From f834e8d43bfd39d2769167c4eee8b64e125843a1 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Sun, 27 Apr 2025 18:22:18 +0200 Subject: [PATCH 01/86] Update DEVELOPMENT.md Starting docu of vm setup on gcp with terraform and chef --- DEVELOPMENT.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index d061afd4936..122d73bd85f 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -10,6 +10,17 @@ This document outlines the process for setting up the development environment fo ## Setting up the Environment +Decide first if you d like a full buildsystem (chef-vm) or a containerized dev environment. +### VM as buildsystem +Uses a Ubuntu 24.04 as base to run chef to setup all dependencies. +The initial compilation is CPU intense and 16vcpu are recommended. +on GCP a balanced disk of 500 GB and a vm type that supports nested virtualization should be chosen +N2... works well. +1) Install chef and some deps + +2) Make Minikube run and deploy a vanilla pixie + +### Containerized Devenv To set up the developer environment required to start building Pixie's components, run the `run_docker.sh` script. The following script will run the Docker container and dump you out inside the docker container console from which you can run all the necessary tools to build, test, and deploy Pixie in development mode. 1. Since this script runs a Docker container, you must have Docker installed. To install it follow these instructions [here](https://docs.docker.com/get-docker/). From 08c5c9e95aca0e8d27a921a6e54ea5cabb870889 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Sun, 27 Apr 2025 18:22:18 +0200 Subject: [PATCH 02/86] Update DEVELOPMENT.md Starting docu of vm setup on gcp with terraform and chef Signed-off-by: entlein --- DEVELOPMENT.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index d061afd4936..122d73bd85f 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -10,6 +10,17 @@ This document outlines the process for setting up the development environment fo ## Setting up the Environment +Decide first if you d like a full buildsystem (chef-vm) or a containerized dev environment. +### VM as buildsystem +Uses a Ubuntu 24.04 as base to run chef to setup all dependencies. +The initial compilation is CPU intense and 16vcpu are recommended. +on GCP a balanced disk of 500 GB and a vm type that supports nested virtualization should be chosen +N2... works well. +1) Install chef and some deps + +2) Make Minikube run and deploy a vanilla pixie + +### Containerized Devenv To set up the developer environment required to start building Pixie's components, run the `run_docker.sh` script. The following script will run the Docker container and dump you out inside the docker container console from which you can run all the necessary tools to build, test, and deploy Pixie in development mode. 1. Since this script runs a Docker container, you must have Docker installed. To install it follow these instructions [here](https://docs.docker.com/get-docker/). From e40aed60af312f42e868f8be7f754614b7dddcba Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Sun, 27 Apr 2025 20:10:21 +0200 Subject: [PATCH 03/86] Update DEVELOPMENT.md From mobile phone Signed-off-by: entlein --- DEVELOPMENT.md | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 122d73bd85f..e8f44e46e83 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -16,9 +16,37 @@ Uses a Ubuntu 24.04 as base to run chef to setup all dependencies. The initial compilation is CPU intense and 16vcpu are recommended. on GCP a balanced disk of 500 GB and a vm type that supports nested virtualization should be chosen N2... works well. + +warning : the first build takes several hours and at least 160 Gb of space + 1) Install chef and some deps + +apt install +curl getchef +echo source /optpx >.bashrc +edit bazelrc and cooy to homedit +create a cache dir + +2) create a registry and authn + +3) Make Minikube run and deploy a vanilla pixie + +libvirt group +mkcert + + +4) edit skaffold build +check compilerflags + +5) golden image +if you get this all working, bake an image at this point + +notes kn cache sharing +if building in a multi user env : as long as the cache dir belongs to s group that yoir ysers are part of, the build can reuse the cache across different users + -2) Make Minikube run and deploy a vanilla pixie +notes on debugging symbols : +if you anticipate needing gdb compile with gdb , else opt ### Containerized Devenv To set up the developer environment required to start building Pixie's components, run the `run_docker.sh` script. The following script will run the Docker container and dump you out inside the docker container console from which you can run all the necessary tools to build, test, and deploy Pixie in development mode. From 17b9d5b9077575452a4c62c372aae65698006a6b Mon Sep 17 00:00:00 2001 From: entlein Date: Sun, 27 Apr 2025 22:57:48 +0200 Subject: [PATCH 04/86] adding the build cache entry for bazel Signed-off-by: entlein --- .bazelrc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.bazelrc b/.bazelrc index 86182129958..8860295075a 100644 --- a/.bazelrc +++ b/.bazelrc @@ -1,4 +1,6 @@ # Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. +# on Chef VM Use a local cache dir that belongs to the bazelcache group +build --disk_cache=/tmp/bazel/ # Use strict action env to prevent leaks of env vars. build --incompatible_strict_action_env From 652265ed739186110455d2b9819b38984b545ab6 Mon Sep 17 00:00:00 2001 From: entlein Date: Sun, 27 Apr 2025 23:04:37 +0200 Subject: [PATCH 05/86] adding the compile mode into vizier skaffold Signed-off-by: entlein --- skaffold/skaffold_vizier.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/skaffold/skaffold_vizier.yaml b/skaffold/skaffold_vizier.yaml index 2b6218a8c7d..a3f61e086ad 100644 --- a/skaffold/skaffold_vizier.yaml +++ b/skaffold/skaffold_vizier.yaml @@ -144,3 +144,4 @@ profiles: path: /build/artifacts/context=./bazel/args value: - --config=x86_64_sysroot + - --compilation_mode=opt From ef467a8b9b1f27ee4693d05c1f21c6e90868ff27 Mon Sep 17 00:00:00 2001 From: entlein Date: Sun, 27 Apr 2025 23:06:37 +0200 Subject: [PATCH 06/86] these should be the most important steps Signed-off-by: entlein --- DEVELOPMENT.md | 58 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index e8f44e46e83..ab7f9aa9983 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -21,32 +21,62 @@ warning : the first build takes several hours and at least 160 Gb of space 1) Install chef and some deps -apt install -curl getchef -echo source /optpx >.bashrc -edit bazelrc and cooy to homedit -create a cache dir +``` +sudo apt update sudo apt install -y git coreutils mkcert libnss3-tools screen libvirt-daemon-system libvirt-clients qemu-kvm virt-manager +curl -L https://chefdownload-community.chef.io/install.sh | sudo bash +Now, on this VM, clone pixie (or your fork of it) -2) create a registry and authn +``` +git clone https://github.com/pixie-io/pixie.git +cd pixie/tools/chef +sudo chef-solo -c solo.rb -j node_workstation.json +sudo usermod -aG libvirt $USER +``` + +Make permanent the env loading via your bashrc +```sh +echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc +``` + +Put the baselrc into your homedir: +```sh +cp .bazelrc ~/. +``` + +Create a cache dir under like /tmp/bazel +``` +sudo groupadd bazelcache +sudo usermod -aG bazelcache $USER +sudo mkdir -p +sudo chown :bazelcache +sudo chmod 2775 +``` + +2) Create/Use a registry you control and login + +``` +docker login `myregistry` +``` 3) Make Minikube run and deploy a vanilla pixie -libvirt group -mkcert +If you added your user to the libvirt group, this will now work: +``` +make dev-env-start +``` +4) run skaffold build to deploy (after you have the vanilla setup working on minikube) +check your docke login token is still valid -4) edit skaffold build -check compilerflags +``` +skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=ghcr.io/k8sstormcenter +``` 5) golden image if you get this all working, bake an image at this point -notes kn cache sharing -if building in a multi user env : as long as the cache dir belongs to s group that yoir ysers are part of, the build can reuse the cache across different users -notes on debugging symbols : -if you anticipate needing gdb compile with gdb , else opt ### Containerized Devenv To set up the developer environment required to start building Pixie's components, run the `run_docker.sh` script. The following script will run the Docker container and dump you out inside the docker container console from which you can run all the necessary tools to build, test, and deploy Pixie in development mode. From 6730d7eedfeb962046dfc93e0c1cedc00e22d15b Mon Sep 17 00:00:00 2001 From: entlein Date: Sun, 27 Apr 2025 23:11:36 +0200 Subject: [PATCH 07/86] more text Signed-off-by: entlein --- DEVELOPMENT.md | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index ab7f9aa9983..6065ac1395b 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -10,14 +10,24 @@ This document outlines the process for setting up the development environment fo ## Setting up the Environment -Decide first if you d like a full buildsystem (chef-vm) or a containerized dev environment. +Decide first if you'd like a full buildsystem (on a VM) or a containerized dev environment. + ### VM as buildsystem -Uses a Ubuntu 24.04 as base to run chef to setup all dependencies. -The initial compilation is CPU intense and 16vcpu are recommended. -on GCP a balanced disk of 500 GB and a vm type that supports nested virtualization should be chosen -N2... works well. +Uses a Ubuntu 24.04 as base to run `chef` to setup all dependencies. +The initial compilation is CPU intense and `16vcpu` are recommended. +On GCP: a balanced disk of 500 GB and a VM type that supports nested virtualization should be chosen +`n2-standard-16` works well. + +> [!Warning] +> The first build takes several hours and at least 160 Gb of space -warning : the first build takes several hours and at least 160 Gb of space +Turn on nested virtualization and dont use `spot` VMs for the first build as you do not want your very long first +build to interrupt. If you create the VMs as templates from an image, you can later switch to more cost-effective `spot` instances. + +```yaml +advancedMachineFeatures: + enableNestedVirtualization: true +``` 1) Install chef and some deps From 1124c1fda45d3b0151dbcf2c5fc22ea820cd461a Mon Sep 17 00:00:00 2001 From: entlein Date: Sun, 27 Apr 2025 23:13:00 +0200 Subject: [PATCH 08/86] comments unaligned fixed Signed-off-by: entlein --- DEVELOPMENT.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 6065ac1395b..4b9f7bed772 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -34,6 +34,7 @@ advancedMachineFeatures: ``` sudo apt update sudo apt install -y git coreutils mkcert libnss3-tools screen libvirt-daemon-system libvirt-clients qemu-kvm virt-manager curl -L https://chefdownload-community.chef.io/install.sh | sudo bash +``` Now, on this VM, clone pixie (or your fork of it) ``` From 889de799d5e7cae745a38bdb38801ef341757c9d Mon Sep 17 00:00:00 2001 From: entlein Date: Sun, 27 Apr 2025 23:19:11 +0200 Subject: [PATCH 09/86] should be all now Signed-off-by: entlein --- DEVELOPMENT.md | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 4b9f7bed772..7ad701d938f 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -55,7 +55,7 @@ cp .bazelrc ~/. ``` Create a cache dir under like /tmp/bazel -``` +```sh sudo groupadd bazelcache sudo usermod -aG bazelcache $USER sudo mkdir -p @@ -65,26 +65,33 @@ sudo chmod 2775 2) Create/Use a registry you control and login -``` -docker login `myregistry` +```sh +docker login ghcr.io/myregistry ``` 3) Make Minikube run and deploy a vanilla pixie If you added your user to the libvirt group, this will now work: -``` +```sh make dev-env-start ``` - -4) run skaffold build to deploy (after you have the vanilla setup working on minikube) -check your docke login token is still valid - +Deploy vanilla pixie (remote cloud) +```sh +sudo bash -c "$(curl -fsSL https://getcosmic.ai/install.sh)" +export PX_CLOUD_ADDR=getcosmic.ai +px auth +px deploy -p=1Gi ``` +4) Skaffold to deploy (after you have the vanilla setup working on minikube) + +your docker login token must still be valid + +```sh skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=ghcr.io/k8sstormcenter ``` -5) golden image -if you get this all working, bake an image at this point +5) Golden image +Once all the above is working and the first cache has been build, bake an image of your VM for safekeeping. From 4036120806f7e1e3a8a1ab8a4886d359bea4106a Mon Sep 17 00:00:00 2001 From: entlein Date: Sun, 27 Apr 2025 23:20:12 +0200 Subject: [PATCH 10/86] should be all now Signed-off-by: entlein --- DEVELOPMENT.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 7ad701d938f..cbf81b280f2 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -91,6 +91,7 @@ skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo= ``` 5) Golden image + Once all the above is working and the first cache has been build, bake an image of your VM for safekeeping. From 6ee0c1bfed24a5a0526ac192b843a68d9785883c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mehmet=20Berk=20G=C3=BCr=C3=A7ay?= Date: Fri, 2 May 2025 20:57:42 +0000 Subject: [PATCH 11/86] review development.md, and add extra comments Signed-off-by: entlein --- DEVELOPMENT.md | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index cbf81b280f2..cc3672d46ec 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -66,7 +66,7 @@ sudo chmod 2775 2) Create/Use a registry you control and login ```sh -docker login ghcr.io/myregistry +docker login ghcr.io/ ``` 3) Make Minikube run and deploy a vanilla pixie @@ -82,12 +82,20 @@ export PX_CLOUD_ADDR=getcosmic.ai px auth px deploy -p=1Gi ``` -4) Skaffold to deploy (after you have the vanilla setup working on minikube) +For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud + +4) Once you make changes on source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) your docker login token must still be valid ```sh -skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=ghcr.io/k8sstormcenter +> skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=ghcr.io/ +``` + +Optional: you can set default-repo on config, so that you don't need to pass it as an argument everytime +```sh +> skaffold config set default-repo ghcr.io/ +> skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot ``` 5) Golden image From b01f03e49efdf782dd539531184116ae4775ab74 Mon Sep 17 00:00:00 2001 From: entlein Date: Sat, 3 May 2025 11:42:09 +0200 Subject: [PATCH 12/86] chore: cosmetic beautification Signed-off-by: entlein --- DEVELOPMENT.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index cc3672d46ec..607227672f8 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -15,21 +15,23 @@ Decide first if you'd like a full buildsystem (on a VM) or a containerized dev e ### VM as buildsystem Uses a Ubuntu 24.04 as base to run `chef` to setup all dependencies. The initial compilation is CPU intense and `16vcpu` are recommended. -On GCP: a balanced disk of 500 GB and a VM type that supports nested virtualization should be chosen +This was tested on GCP: a balanced disk of 500 GB and a VM type that supports nested virtualization should be chosen, as of writing (May 2025) `n2-standard-16` works well. > [!Warning] > The first build takes several hours and at least 160 Gb of space -Turn on nested virtualization and dont use `spot` VMs for the first build as you do not want your very long first -build to interrupt. If you create the VMs as templates from an image, you can later switch to more cost-effective `spot` instances. +Turn on nested virtualization and avoid the use of `spot` VMs for the first build to avoid the very long first +build interrupting. If you create the VMs as templates from an image, you can later switch to more cost-effective `spot` instances. + + ```yaml advancedMachineFeatures: enableNestedVirtualization: true ``` -1) Install chef and some deps +1) Install chef and some dependencies ``` sudo apt update sudo apt install -y git coreutils mkcert libnss3-tools screen libvirt-daemon-system libvirt-clients qemu-kvm virt-manager @@ -53,7 +55,7 @@ Put the baselrc into your homedir: ```sh cp .bazelrc ~/. ``` - +In order to very significantly speed up your work, you may opt for a local cache directory. This can be shared between users of the VM, if both are part of the same group. Create a cache dir under like /tmp/bazel ```sh sudo groupadd bazelcache @@ -71,7 +73,7 @@ docker login ghcr.io/ 3) Make Minikube run and deploy a vanilla pixie -If you added your user to the libvirt group, this will now work: +If you added your user to the libvirt group (`sudo usermod -aG libvirt $USER`), this will now work (if you did this interactively: you need to refresh your group membership, e.g. by logout/login) ```sh make dev-env-start ``` @@ -84,9 +86,9 @@ px deploy -p=1Gi ``` For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud -4) Once you make changes on source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) +4) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) -your docker login token must still be valid +Check, that your docker login token is still valid: ```sh > skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=ghcr.io/ From eb762e3655bfcc163d334b378f7750fa549fe06c Mon Sep 17 00:00:00 2001 From: entlein Date: Sun, 4 May 2025 13:02:13 +0200 Subject: [PATCH 13/86] chore: comment out the cache dir for bazel and explain how to use it if desired Signed-off-by: entlein --- .bazelrc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.bazelrc b/.bazelrc index 8860295075a..68733e7d35c 100644 --- a/.bazelrc +++ b/.bazelrc @@ -1,6 +1,8 @@ # Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. -# on Chef VM Use a local cache dir that belongs to the bazelcache group -build --disk_cache=/tmp/bazel/ + +# Use local Cache directory if building on a VM: +# On Chef VM, create a directory and comment in the following line: +# build --disk_cache= # Optional for multi-user cache: Make this directory owned by a group name e.g. "bazelcache" # Use strict action env to prevent leaks of env vars. build --incompatible_strict_action_env From e9858af08dc1756cae80935be4fda21528d75a74 Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 11:26:02 +0200 Subject: [PATCH 14/86] PR resolve: seperating the 24.04 specifics from the overall description Signed-off-by: entlein --- DEVELOPMENT.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 607227672f8..befe632df3d 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -13,16 +13,15 @@ This document outlines the process for setting up the development environment fo Decide first if you'd like a full buildsystem (on a VM) or a containerized dev environment. ### VM as buildsystem -Uses a Ubuntu 24.04 as base to run `chef` to setup all dependencies. -The initial compilation is CPU intense and `16vcpu` are recommended. -This was tested on GCP: a balanced disk of 500 GB and a VM type that supports nested virtualization should be chosen, as of writing (May 2025) -`n2-standard-16` works well. + +This utilizes `chef` to setup all dependencies and is based on `ubuntu`. The VM type must support nested virtualization for `minikube` to work. + + +The following specifics were tested on GCP on a Ubuntu 24.04 (May 2025): The initial compilation is CPU intense and `16vcpu` were a good trade-off, a balanced disk of 500 GB seems convienent and overall `n2-standard-16` works well. > [!Warning] > The first build takes several hours and at least 160 Gb of space - -Turn on nested virtualization and avoid the use of `spot` VMs for the first build to avoid the very long first -build interrupting. If you create the VMs as templates from an image, you can later switch to more cost-effective `spot` instances. +> Turn on nested virtualization during provisioning and avoid the use of `spot` VMs for the first build to avoid the very long first build interrupting. If you create the VMs as templates from an image, you can later switch to more cost-effective `spot` instances. From 37ca63d897e99a82a0cab04541951a6736849c81 Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 11:53:27 +0200 Subject: [PATCH 15/86] PR resolve: seperating the 24.04 specifics from the overall description Signed-off-by: entlein --- DEVELOPMENT.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index befe632df3d..08a2ed1ad47 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -31,11 +31,22 @@ advancedMachineFeatures: ``` 1) Install chef and some dependencies - + +WIP: this needs to be retested after moving it into `chef` rather than doing by hand or via init-script: +While we re-test, you may run the following install manually +```bash +sudo apt update +sudo apt install -y git coreutils mkcert libnss3-tools libvirt-daemon-system libvirt-clients qemu-kvm virt-manager ``` -sudo apt update sudo apt install -y git coreutils mkcert libnss3-tools screen libvirt-daemon-system libvirt-clients qemu-kvm virt-manager + + +```bash curl -L https://chefdownload-community.chef.io/install.sh | sudo bash ``` +You may find it helpful to use a terminal manager like `screen` or `tmux`, esp to detach the builds. +```bash +sudo apt install -y screen +``` Now, on this VM, clone pixie (or your fork of it) ``` From aa134b96e6f806e441a50276d04be19bf8daaef8 Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 12:12:14 +0200 Subject: [PATCH 16/86] PR resolve: referencing upstream doc for cli install and cleaning up language Signed-off-by: entlein --- DEVELOPMENT.md | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 08a2ed1ad47..6bd59923eaa 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -83,20 +83,23 @@ docker login ghcr.io/ 3) Make Minikube run and deploy a vanilla pixie -If you added your user to the libvirt group (`sudo usermod -aG libvirt $USER`), this will now work (if you did this interactively: you need to refresh your group membership, e.g. by logout/login) +If you added your user to the libvirt group (`sudo usermod -aG libvirt $USER`), starting the development environment on this VM will now work (if you did this interactively: you need to refresh your group membership, e.g. by logout/login). The following command will, amongst other things, start minikube ```sh make dev-env-start ``` -Deploy vanilla pixie (remote cloud) + +Onto this minikube, we first deploy the upstream pixie (`vizier`, `kelvin` and `pem`) using the remote cloud `export PX_CLOUD_ADDR=getcosmic.ai` . Follow https://docs.px.dev/installing-pixie/install-schemes/cli , to install the `px` command line interface and login: +```sh +px auth login +``` + +Once, logged in, we found that limiting the memory is useful, thus after login, set the deploy option like so: ```sh -sudo bash -c "$(curl -fsSL https://getcosmic.ai/install.sh)" -export PX_CLOUD_ADDR=getcosmic.ai -px auth px deploy -p=1Gi ``` For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud -4) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) +1) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) Check, that your docker login token is still valid: From a6568b4e2360dc996f5b4716f124beb198f08ee0 Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 12:16:44 +0200 Subject: [PATCH 17/86] PR resolve: markdown numbering got confused Signed-off-by: entlein --- DEVELOPMENT.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 6bd59923eaa..55fe311ec3a 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -93,15 +93,15 @@ Onto this minikube, we first deploy the upstream pixie (`vizier`, `kelvin` and ` px auth login ``` -Once, logged in, we found that limiting the memory is useful, thus after login, set the deploy option like so: +Once logged in to pixie, we found that limiting the memory is useful, thus after login, set the deploy option like so: ```sh px deploy -p=1Gi ``` For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud -1) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) +4) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) -Check, that your docker login token is still valid: +Check that your docker login token is still valid, then ```sh > skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=ghcr.io/ @@ -115,7 +115,7 @@ Optional: you can set default-repo on config, so that you don't need to pass it 5) Golden image -Once all the above is working and the first cache has been build, bake an image of your VM for safekeeping. +Once all the above is working and the first cache has been built, bake an image of your VM for safekeeping. From 859063a380fbfe14302eccb426c057b8f3ada327 Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 12:28:12 +0200 Subject: [PATCH 18/86] feature: moving the minikube ubuntu dependencies into chef rather than manual install, needs to be tested, do NOT MERGE Signed-off-by: entlein --- DEVELOPMENT.md | 2 +- tools/chef/cookbooks/px_dev/recipes/linux.rb | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 55fe311ec3a..b7b5bb0cf50 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -49,7 +49,7 @@ sudo apt install -y screen ``` Now, on this VM, clone pixie (or your fork of it) -``` +```bash git clone https://github.com/pixie-io/pixie.git cd pixie/tools/chef sudo chef-solo -c solo.rb -j node_workstation.json diff --git a/tools/chef/cookbooks/px_dev/recipes/linux.rb b/tools/chef/cookbooks/px_dev/recipes/linux.rb index c805c98fb20..4371576ea9d 100644 --- a/tools/chef/cookbooks/px_dev/recipes/linux.rb +++ b/tools/chef/cookbooks/px_dev/recipes/linux.rb @@ -56,6 +56,17 @@ 'qemu-system-x86', 'qemu-user-static', 'qemu-utils', + + # Minikube dependencies for kvm + 'libnss3-tools', + 'libvirt-daemon-system', + 'libvirt-clients', + 'qemu-kvm', + 'virt-manager', + + # Pixie dependencies + 'mkcert', + #'coreutils' not sure about that one, need to test ] apt_package apt_pkg_list do From 1c1bb8ca19a28029dad45553a57cb06a063c54dc Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Wed, 7 May 2025 14:50:03 +0200 Subject: [PATCH 19/86] Update DEVELOPMENT.md Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index b7b5bb0cf50..55a10fa7af4 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -32,13 +32,7 @@ advancedMachineFeatures: 1) Install chef and some dependencies -WIP: this needs to be retested after moving it into `chef` rather than doing by hand or via init-script: -While we re-test, you may run the following install manually -```bash -sudo apt update -sudo apt install -y git coreutils mkcert libnss3-tools libvirt-daemon-system libvirt-clients qemu-kvm virt-manager -``` - +First, install `chef` to cook your `recipies`: ```bash curl -L https://chefdownload-community.chef.io/install.sh | sudo bash @@ -61,21 +55,31 @@ Make permanent the env loading via your bashrc echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc ``` -Put the baselrc into your homedir: -```sh -cp .bazelrc ~/. -``` + In order to very significantly speed up your work, you may opt for a local cache directory. This can be shared between users of the VM, if both are part of the same group. Create a cache dir under like /tmp/bazel ```sh sudo groupadd bazelcache sudo usermod -aG bazelcache $USER sudo mkdir -p -sudo chown :bazelcache +sudo chown -R :bazelcache sudo chmod 2775 ``` -2) Create/Use a registry you control and login +Edit the into the .bazelrc and put the it into your homedir: +``` +# Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. + +# Use local Cache directory if building on a VM: +# On Chef VM, create a directory and comment in the following line: + build --disk_cache=/tmp/bazel/ # Optional for multi-user cache: Make this directory owned by a group name e.g. "bazelcache" +``` + +```sh +cp .bazelrc ~/. +``` + +1) Create/Use a registry you control and login ```sh docker login ghcr.io/ @@ -100,6 +104,11 @@ px deploy -p=1Gi For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud 4) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) + +Now, ensure that you have commented in the bazelcache-directory into the bazel config. +``` + +``` Check that your docker login token is still valid, then @@ -113,7 +122,7 @@ Optional: you can set default-repo on config, so that you don't need to pass it > skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot ``` -5) Golden image +1) Golden image Once all the above is working and the first cache has been built, bake an image of your VM for safekeeping. @@ -249,3 +258,5 @@ You will be able to run any of the CLI commands using `bazel run`. - `bazel run //src/pixie_cli:px -- deploy` will be equivalent to `px deploy` - `bazel run //src/pixie_cli:px -- run px/cluster` is the same as `px run px/cluster` + + From 9f31d5f169be8c6b232bb960bbee9208debf26a7 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Wed, 7 May 2025 15:05:12 +0200 Subject: [PATCH 20/86] Update DEVELOPMENT.md Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 55a10fa7af4..91bc09d4f9d 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -41,6 +41,18 @@ You may find it helpful to use a terminal manager like `screen` or `tmux`, esp t ```bash sudo apt install -y screen ``` + +In order to very significantly speed up your work, you may opt for a local cache directory. This can be shared between users of the VM, if both are part of the same group. +Create a cache dir under like /tmp/bazel +```sh +sudo groupadd bazelcache +sudo usermod -aG bazelcache $USER +sudo mkdir -p +sudo chown -R :bazelcache +sudo chmod 2775 +``` + + Now, on this VM, clone pixie (or your fork of it) ```bash @@ -56,17 +68,8 @@ echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc ``` -In order to very significantly speed up your work, you may opt for a local cache directory. This can be shared between users of the VM, if both are part of the same group. -Create a cache dir under like /tmp/bazel -```sh -sudo groupadd bazelcache -sudo usermod -aG bazelcache $USER -sudo mkdir -p -sudo chown -R :bazelcache -sudo chmod 2775 -``` -Edit the into the .bazelrc and put the it into your homedir: +Edit the `` into the .bazelrc and put the it into your homedir: ``` # Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. From 61ffd7eb523c1d03825e28a8999af4bf7c3254c6 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Wed, 7 May 2025 15:06:39 +0200 Subject: [PATCH 21/86] Update DEVELOPMENT.md Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 91bc09d4f9d..c5b8eac4ba1 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -69,7 +69,7 @@ echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc -Edit the `` into the .bazelrc and put the it into your homedir: +Edit the `` into the .bazelrc and put it into your homedir: ``` # Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. From f7fab712ce2e8565ba67f0c3ad778bfb4afca47e Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Wed, 7 May 2025 15:11:28 +0200 Subject: [PATCH 22/86] Fixing the numbering and removing empty quotes Sry for all the commits Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index c5b8eac4ba1..21763fb99f9 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -68,7 +68,7 @@ echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc ``` - +2) If using Cache, tell bazel about it Edit the `` into the .bazelrc and put it into your homedir: ``` # Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. @@ -82,13 +82,13 @@ Edit the `` into the .bazelrc and put it into your homedir: cp .bazelrc ~/. ``` -1) Create/Use a registry you control and login +3) Create/Use a registry you control and login ```sh docker login ghcr.io/ ``` -3) Make Minikube run and deploy a vanilla pixie +4) Make Minikube run and deploy a vanilla pixie If you added your user to the libvirt group (`sudo usermod -aG libvirt $USER`), starting the development environment on this VM will now work (if you did this interactively: you need to refresh your group membership, e.g. by logout/login). The following command will, amongst other things, start minikube ```sh @@ -106,12 +106,9 @@ px deploy -p=1Gi ``` For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud -4) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) - -Now, ensure that you have commented in the bazelcache-directory into the bazel config. -``` +5) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) -``` +Ensure that you have commented in the bazelcache-directory into the bazel config (see Step 2) Check that your docker login token is still valid, then @@ -125,7 +122,7 @@ Optional: you can set default-repo on config, so that you don't need to pass it > skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot ``` -1) Golden image +6) Golden image Once all the above is working and the first cache has been built, bake an image of your VM for safekeeping. From 77d82f06cbcf1362074fbb299342abe387e4ee76 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Wed, 7 May 2025 15:12:27 +0200 Subject: [PATCH 23/86] newline added Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 21763fb99f9..533cdc511e2 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -69,6 +69,8 @@ echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc 2) If using Cache, tell bazel about it + + Edit the `` into the .bazelrc and put it into your homedir: ``` # Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. From 840dc97736d06f93eb0bc93cd0abab9d013d40da Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 15:20:47 +0200 Subject: [PATCH 24/86] adding a file to document how an SRE would work with Pixie while the Devs are writing different pieces and those might be on different repos --- DEVELOPMENT.md | 136 ++++++++++++++++++++++++++++++++++++++++++++++--- PLATFORM.md | 4 ++ 2 files changed, 133 insertions(+), 7 deletions(-) create mode 100644 PLATFORM.md diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 122d73bd85f..c7521c0169e 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -10,15 +10,130 @@ This document outlines the process for setting up the development environment fo ## Setting up the Environment -Decide first if you d like a full buildsystem (chef-vm) or a containerized dev environment. +Decide first if you'd like a full buildsystem (on a VM) or a containerized dev environment. + ### VM as buildsystem -Uses a Ubuntu 24.04 as base to run chef to setup all dependencies. -The initial compilation is CPU intense and 16vcpu are recommended. -on GCP a balanced disk of 500 GB and a vm type that supports nested virtualization should be chosen -N2... works well. -1) Install chef and some deps -2) Make Minikube run and deploy a vanilla pixie +This utilizes `chef` to setup all dependencies and is based on `ubuntu`. The VM type must support nested virtualization for `minikube` to work. + + +The following specifics were tested on GCP on a Ubuntu 24.04 (May 2025): The initial compilation is CPU intense and `16vcpu` were a good trade-off, a balanced disk of 500 GB seems convienent and overall `n2-standard-16` works well. + +> [!Warning] +> The first build takes several hours and at least 160 Gb of space +> Turn on nested virtualization during provisioning and avoid the use of `spot` VMs for the first build to avoid the very long first build interrupting. If you create the VMs as templates from an image, you can later switch to more cost-effective `spot` instances. + + + +```yaml +advancedMachineFeatures: + enableNestedVirtualization: true +``` + +1) Install chef and some dependencies + +WIP: this needs to be retested after moving it into `chef` rather than doing by hand or via init-script: +While we re-test, you may run the following install manually +```bash +sudo apt update +sudo apt install -y git coreutils mkcert libnss3-tools libvirt-daemon-system libvirt-clients qemu-kvm virt-manager +``` + + +```bash +curl -L https://chefdownload-community.chef.io/install.sh | sudo bash +``` +You may find it helpful to use a terminal manager like `screen` or `tmux`, esp to detach the builds. +```bash +sudo apt install -y screen +``` +Now, on this VM, clone pixie (or your fork of it) + +```bash +git clone https://github.com/pixie-io/pixie.git +cd pixie/tools/chef +sudo chef-solo -c solo.rb -j node_workstation.json +sudo usermod -aG libvirt $USER +``` + +Make permanent the env loading via your bashrc +```sh +echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc +``` + + +In order to very significantly speed up your work, you may opt for a local cache directory. This can be shared between users of the VM, if both are part of the same group. +Create a cache dir under like /tmp/bazel +```sh +sudo groupadd bazelcache +sudo usermod -aG bazelcache $USER +sudo mkdir -p +sudo chown -R :bazelcache +sudo chmod 2775 +``` + +Edit the into the .bazelrc and put the it into your homedir: +``` +# Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. + +# Use local Cache directory if building on a VM: +# On Chef VM, create a directory and comment in the following line: + build --disk_cache=/tmp/bazel/ # Optional for multi-user cache: Make this directory owned by a group name e.g. "bazelcache" +``` + +```sh +cp .bazelrc ~/. +``` + +1) Create/Use a registry you control and login + +```sh +docker login ghcr.io/ +``` + +3) Make Minikube run and deploy a vanilla pixie + +If you added your user to the libvirt group (`sudo usermod -aG libvirt $USER`), starting the development environment on this VM will now work (if you did this interactively: you need to refresh your group membership, e.g. by logout/login). The following command will, amongst other things, start minikube +```sh +make dev-env-start +``` + +Onto this minikube, we first deploy the upstream pixie (`vizier`, `kelvin` and `pem`) using the remote cloud `export PX_CLOUD_ADDR=getcosmic.ai` . Follow https://docs.px.dev/installing-pixie/install-schemes/cli , to install the `px` command line interface and login: +```sh +px auth login +``` + +Once logged in to pixie, we found that limiting the memory is useful, thus after login, set the deploy option like so: +```sh +px deploy -p=1Gi +``` +For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud + +4) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) + +Now, ensure that you have commented in the bazelcache-directory into the bazel config. +``` + +``` + +Check that your docker login token is still valid, then + +```sh +> skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=ghcr.io/ +``` + +Optional: you can set default-repo on config, so that you don't need to pass it as an argument everytime +```sh +> skaffold config set default-repo ghcr.io/ +> skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot +``` + +1) Golden image + +Once all the above is working and the first cache has been built, bake an image of your VM for safekeeping. + + + ### Containerized Devenv To set up the developer environment required to start building Pixie's components, run the `run_docker.sh` script. The following script will run the Docker container and dump you out inside the docker container console from which you can run all the necessary tools to build, test, and deploy Pixie in development mode. @@ -149,3 +264,10 @@ You will be able to run any of the CLI commands using `bazel run`. - `bazel run //src/pixie_cli:px -- deploy` will be equivalent to `px deploy` - `bazel run //src/pixie_cli:px -- run px/cluster` is the same as `px run px/cluster` + + +# Using a Custom Pixie without Development Environment +This section is on deploying pixie when it is in a state where parts are official and parts are self-developped, without setting up the Development environment + +First, get yourself a kubernetes and have helm, kubectl and your favourite tools in your favourite places. + diff --git a/PLATFORM.md b/PLATFORM.md new file mode 100644 index 00000000000..4490f773e2d --- /dev/null +++ b/PLATFORM.md @@ -0,0 +1,4 @@ +# Using a Custom Pixie without Development Environment +This section is on deploying pixie when it is in a state where parts are official and parts are self-developped, without setting up the Development environment + +First, get yourself a kubernetes and have helm, kubectl and your favourite tools in your favourite places. \ No newline at end of file From 46de0bdefc32c479328a7583759ee31bbefa736f Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 15:41:58 +0200 Subject: [PATCH 25/86] I thought i already commited this Signed-off-by: entlein --- PLATFORM.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/PLATFORM.md b/PLATFORM.md index 4490f773e2d..e04091a0223 100644 --- a/PLATFORM.md +++ b/PLATFORM.md @@ -1,4 +1,5 @@ # Using a Custom Pixie without Development Environment This section is on deploying pixie when it is in a state where parts are official and parts are self-developped, without setting up the Development environment -First, get yourself a kubernetes and have helm, kubectl and your favourite tools in your favourite places. \ No newline at end of file +First, get yourself a kubernetes and have helm, kubectl and your favourite tools in your favourite places. + From 3d95116a7f6a4ebc1007018a9500ca61a38f6063 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Wed, 7 May 2025 16:33:28 +0200 Subject: [PATCH 26/86] Fixed -R for recursive setgid bit without the perms will not be inherited Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 533cdc511e2..a1869f68059 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -49,7 +49,7 @@ sudo groupadd bazelcache sudo usermod -aG bazelcache $USER sudo mkdir -p sudo chown -R :bazelcache -sudo chmod 2775 +sudo chmod -R 2775 ``` From 085e27bf96de057066830b999234d7e270637e76 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Wed, 7 May 2025 17:16:02 +0200 Subject: [PATCH 27/86] Adding the missing kernel header warning explanation for minikube Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index a1869f68059..c5c918fe747 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -106,7 +106,12 @@ Once logged in to pixie, we found that limiting the memory is useful, thus after ```sh px deploy -p=1Gi ``` -For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud +For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud. + +You may encounter the following WARNING, which is related to the kernel headers missing on the minikube node (this is not your VM node). Usually, for development purposes this is safe to ignore. Please see [pixie-issue2051](https://github.com/pixie-io/pixie/issues/2051) for further details. +``` +ERR: Detected missing kernel headers on your cluster's nodes. This may cause issues with the Pixie agent. Please install kernel headers on all nodes. +``` 5) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) From c56fc280c1f947a24bc865a59950e6907b1677e4 Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 18:36:29 +0200 Subject: [PATCH 28/86] test if this is a feasible approach to overwrite the skaffold produced artefacts Signed-off-by: entlein --- vizier-chart/Chart.yaml | 4 + vizier-chart/templates/00_secrets.yaml | 100 + vizier-chart/templates/01_nats.yaml | 246 ++ vizier-chart/templates/02_etcd.yaml | 238 ++ vizier-chart/templates/03_vizier_etcd.yaml | 2303 ++++++++++++++++ .../templates/04_vizier_persistent.yaml | 2337 ++++++++++++++++ vizier-chart/templates/05_vizier_etcd_ap.yaml | 2324 ++++++++++++++++ .../templates/06_vizier_persistent_ap.yaml | 2358 +++++++++++++++++ vizier-chart/templates/image-replace.sh | 63 + vizier-chart/values.yaml | 5 + 10 files changed, 9978 insertions(+) create mode 100644 vizier-chart/Chart.yaml create mode 100644 vizier-chart/templates/00_secrets.yaml create mode 100644 vizier-chart/templates/01_nats.yaml create mode 100644 vizier-chart/templates/02_etcd.yaml create mode 100644 vizier-chart/templates/03_vizier_etcd.yaml create mode 100644 vizier-chart/templates/04_vizier_persistent.yaml create mode 100644 vizier-chart/templates/05_vizier_etcd_ap.yaml create mode 100644 vizier-chart/templates/06_vizier_persistent_ap.yaml create mode 100755 vizier-chart/templates/image-replace.sh create mode 100644 vizier-chart/values.yaml diff --git a/vizier-chart/Chart.yaml b/vizier-chart/Chart.yaml new file mode 100644 index 00000000000..b91fc292c74 --- /dev/null +++ b/vizier-chart/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +name: vizier-chart +type: application +version: 0.14.15 diff --git a/vizier-chart/templates/00_secrets.yaml b/vizier-chart/templates/00_secrets.yaml new file mode 100644 index 00000000000..f87370a1825 --- /dev/null +++ b/vizier-chart/templates/00_secrets.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: v1 +data: + PL_CLOUD_ADDR: {{ if .Values.cloudAddr }}"{{ .Values.cloudAddr }}"{{ else }}"withpixie.ai:443"{{ end }} + PL_CLUSTER_NAME: "{{ .Values.clusterName }}" + PL_UPDATE_CLOUD_ADDR: {{ if .Values.cloudUpdateAddr }}"{{ .Values.cloudUpdateAddr }}"{{ else }}"withpixie.ai:443"{{ end }} +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + creationTimestamp: null + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + name: pl-cloud-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CUSTOM_ANNOTATIONS: "{{ .Values.customAnnotations }}" + PL_CUSTOM_LABELS: "{{ .Values.customLabels }}" + PL_DISABLE_AUTO_UPDATE: {{ if .Values.disableAutoUpdate }}"{{ .Values.disableAutoUpdate }}"{{ else }}"false"{{ end }} + PL_ETCD_OPERATOR_ENABLED: {{ if .Values.useEtcdOperator }}"true"{{else}}"false"{{end}} + PL_MD_ETCD_SERVER: https://pl-etcd-client.{{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }}.svc:2379 + PX_MEMORY_LIMIT: "{{ .Values.pemMemoryLimit }}" + PX_MEMORY_REQUEST: "{{ .Values.pemMemoryRequest }}" +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + creationTimestamp: null + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + name: pl-cluster-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: Secret +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + creationTimestamp: null + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + name: pl-cluster-secrets + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +stringData: + sentry-dsn: "{{ .Values.sentryDSN }}" +--- +apiVersion: v1 +kind: Secret +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + creationTimestamp: null + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + name: pl-deploy-secrets + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +stringData: + deploy-key: "{{ .Values.deployKey }}" diff --git a/vizier-chart/templates/01_nats.yaml b/vizier-chart/templates/01_nats.yaml new file mode 100644 index 00000000000..29aedb8877f --- /dev/null +++ b/vizier-chart/templates/01_nats.yaml @@ -0,0 +1,246 @@ +--- +apiVersion: v1 +data: + nats.conf: | + pid_file: "/var/run/nats/nats.pid" + http: 8222 + + tls { + ca_file: "/etc/nats-server-tls-certs/ca.crt", + cert_file: "/etc/nats-server-tls-certs/server.crt", + key_file: "/etc/nats-server-tls-certs/server.key", + timeout: 3 + verify: true + } +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + name: nats-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + name: pl-nats + name: pl-nats + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: client + port: 4222 + selector: + app: pl-monitoring + name: pl-nats +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + name: pl-nats + name: pl-nats-mgmt + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + clusterIP: None + ports: + - name: cluster + port: 6222 + - name: monitor + port: 8222 + - name: metrics + port: 7777 + - name: leafnodes + port: 7422 + - name: gateways + port: 7522 + selector: + app: pl-monitoring + name: pl-nats +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + name: pl-nats + name: pl-nats + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + name: pl-nats + serviceName: pl-nats + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + name: pl-nats + plane: control + spec: + containers: + - command: + - nats-server + - --config + - /etc/nats-config/nats.conf + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CLUSTER_ADVERTISE + value: $(POD_NAME).pl-nats.$(POD_NAMESPACE).svc + image: '{{ if .Values.registry }}{{ .Values.registry }}/gcr.io-pixie-oss-pixie-prod-vizier-deps-nats:2.9.19-scratch@sha256:5de59286eb54ead4d4a9279846098d4097b9c17a3c0588182398a7250cde1af9{{else}}gcr.io/pixie-oss/pixie-prod/vizier-deps/nats:2.9.19-scratch@sha256:5de59286eb54ead4d4a9279846098d4097b9c17a3c0588182398a7250cde1af9{{end}}' + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - /nats-server -sl=ldm=/var/run/nats/nats.pid && /bin/sleep 60 + livenessProbe: + httpGet: + path: / + port: 8222 + initialDelaySeconds: 10 + timeoutSeconds: 5 + name: pl-nats + ports: + - containerPort: 4222 + name: client + - containerPort: 7422 + name: leafnodes + - containerPort: 6222 + name: cluster + - containerPort: 8222 + name: monitor + - containerPort: 7777 + name: metrics + readinessProbe: + httpGet: + path: / + port: 8222 + initialDelaySeconds: 10 + timeoutSeconds: 5 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/nats-config + name: config-volume + - mountPath: /etc/nats-server-tls-certs + name: nats-server-tls-volume + - mountPath: /var/run/nats + name: pid + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + shareProcessNamespace: true + terminationGracePeriodSeconds: 60 + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: nats-server-tls-volume + secret: + secretName: service-tls-certs + - configMap: + name: nats-config + name: config-volume + - emptyDir: {} + name: pid diff --git a/vizier-chart/templates/02_etcd.yaml b/vizier-chart/templates/02_etcd.yaml new file mode 100644 index 00000000000..4f514ee8aaa --- /dev/null +++ b/vizier-chart/templates/02_etcd.yaml @@ -0,0 +1,238 @@ +{{if .Values.useEtcdOperator}} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + etcd_cluster: pl-etcd + name: pl-etcd + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + clusterIP: None + ports: + - name: client + port: 2379 + - name: peer + port: 2380 + publishNotReadyAddresses: true + selector: + app: pl-monitoring + etcd_cluster: pl-etcd +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + etcd_cluster: pl-etcd + name: pl-etcd-client + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: etcd-client + port: 2379 + selector: + app: pl-monitoring + etcd_cluster: pl-etcd +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + etcd_cluster: pl-etcd + name: pl-etcd + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + app: pl-monitoring + etcd_cluster: pl-etcd + serviceName: pl-etcd + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + etcd_cluster: pl-etcd + plane: control + name: pl-etcd + spec: + containers: + - env: + - name: INITIAL_CLUSTER_SIZE + value: "3" + - name: CLUSTER_NAME + value: pl-etcd + - name: ETCDCTL_API + value: "3" + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DATA_DIR + value: /var/run/etcd + - name: ETCD_AUTO_COMPACTION_RETENTION + value: "5" + - name: ETCD_AUTO_COMPACTION_MODE + value: revision + image: '{{ if .Values.registry }}{{ .Values.registry }}/gcr.io-pixie-oss-pixie-dev-public-etcd:3.5.9@sha256:e18afc6dda592b426834342393c4c4bd076cb46fa7e10fa7818952cae3047ca9{{else}}gcr.io/pixie-oss/pixie-dev-public/etcd:3.5.9@sha256:e18afc6dda592b426834342393c4c4bd076cb46fa7e10fa7818952cae3047ca9{{end}}' + lifecycle: + preStop: + exec: + command: + - /etc/etcd/scripts/prestop.sh + livenessProbe: + exec: + command: + - /etc/etcd/scripts/healthcheck.sh + failureThreshold: 5 + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + name: etcd + ports: + - containerPort: 2379 + name: client + - containerPort: 2380 + name: server + readinessProbe: + exec: + command: + - /etc/etcd/scripts/healthcheck.sh + failureThreshold: 3 + initialDelaySeconds: 1 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + capabilities: + add: + - NET_RAW + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/etcd + name: etcd-data + - mountPath: /etc/etcdtls/member/peer-tls + name: member-peer-tls + - mountPath: /etc/etcdtls/member/server-tls + name: member-server-tls + - mountPath: /etc/etcdtls/client/etcd-tls + name: etcd-client-tls + securityContext: + seccompProfile: + type: RuntimeDefault + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: member-peer-tls + secret: + secretName: etcd-peer-tls-certs + - name: member-server-tls + secret: + secretName: etcd-server-tls-certs + - name: etcd-client-tls + secret: + secretName: etcd-client-tls-certs + - emptyDir: {} + name: etcd-data +--- +apiVersion: {{ if .Values.useBetaPdbVersion }}"policy/v1beta1"{{ else }}"policy/v1"{{ end }} +kind: PodDisruptionBudget +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + name: pl-etcd-pdb + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + minAvailable: 51% + selector: + matchLabels: + app: pl-monitoring + etcd_cluster: pl-etcd + +{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/03_vizier_etcd.yaml b/vizier-chart/templates/03_vizier_etcd.yaml new file mode 100644 index 00000000000..47eeb6bb6e5 --- /dev/null +++ b/vizier-chart/templates/03_vizier_etcd.yaml @@ -0,0 +1,2303 @@ +{{if and (not .Values.autopilot) .Values.useEtcdOperator}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-ns-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - services + - events + - pods/log + verbs: + - get + - watch + - list +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - px.dev + resources: + - viziers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-conn-election + resources: + - leases + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - pods + - services + - persistentvolumes + - persistentvolumeclaims + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + - pods/log + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-conn-election + - metadata-election + resources: + - leases + verbs: + - get + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - px.dev + resources: + - viziers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - px.dev + resources: + - viziers/status + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-vizier-crd-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - px.dev + resources: + - viziers + - viziers/status + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - metadata-election + resources: + - leases + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + creationTimestamp: null + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list +- apiGroups: + - "" + resourceNames: + - kube-system + resources: + - namespaces + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-node-view + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-cluster-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resourceNames: + - kube-system + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + - pods + - services + - endpoints + - namespaces + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - watch + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - namespaces + verbs: + - watch + - get + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-cert-provisioner-role +subjects: +- kind: ServiceAccount + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-cloud-connector-ns-role +subjects: +- kind: ServiceAccount + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-updater-role +subjects: +- kind: ServiceAccount + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-crd-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: default + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-crd-metadata-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-metadata-role +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-query-broker-role +subjects: +- kind: ServiceAccount + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-crd-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-cloud-connector-role +subjects: +- kind: ServiceAccount + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-node-view-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-node-view +subjects: +- kind: ServiceAccount + name: default + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-updater-cluster-role +subjects: +- kind: ServiceAccount + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-vizier-metadata +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-node-view-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-node-view +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CLIENT_TLS_CERT: /certs/client.crt + PL_CLIENT_TLS_KEY: /certs/client.key + PL_SERVER_TLS_CERT: /certs/server.crt + PL_SERVER_TLS_KEY: /certs/server.key + PL_TLS_CA_CERT: /certs/ca.crt +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-tls-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CLIENT_TLS_CERT: /certs/client.crt + PL_CLIENT_TLS_KEY: /certs/client.key + PL_SERVER_TLS_CERT: /certs/server.crt + PL_SERVER_TLS_KEY: /certs/server.key + PL_TLS_CA_CERT: /certs/ca.crt +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-tls-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin-service + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + clusterIP: None + ports: + - name: tcp-http2 + port: 59300 + protocol: TCP + targetPort: 59300 + selector: + app: pl-monitoring + component: vizier + name: kelvin + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: vizier-cloud-connector-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50800 + protocol: TCP + targetPort: 50800 + selector: + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + vizier-bootstrap: "true" + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50400 + protocol: TCP + targetPort: 50400 + selector: + app: pl-monitoring + component: vizier + name: vizier-metadata + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50300 + protocol: TCP + targetPort: 50300 + - name: tcp-grpc-web + port: 50305 + protocol: TCP + targetPort: 50305 + selector: + app: pl-monitoring + component: vizier + name: vizier-query-broker + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: kelvin + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin + plane: data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + - name: PL_SENTRY_DSN + valueFrom: + secretKeyRef: + key: sentry-dsn + name: pl-cluster-secrets + optional: true + - name: PL_HOST_PATH + value: /host + - name: PL_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PL_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_VIZIER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: TCMALLOC_SAMPLE_PARAMETER + value: "1048576" + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15{{end}}' + name: app + ports: + - containerPort: 59300 + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + - mountPath: /sys + name: sys + readOnly: true + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-cloud-connector-svc + - name: SERVICE_PORT + value: "50800" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: cc-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-query-broker-svc + - name: SERVICE_PORT + value: "50300" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: qb-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - hostPath: + path: /sys + type: Directory + name: sys +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: vizier-cloud-connector + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + vizier-bootstrap: "true" + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + plane: control + vizier-bootstrap: "true" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_DEPLOY_KEY + valueFrom: + secretKeyRef: + key: deploy-key + name: {{ if .Values.customDeployKeySecret }}"{{ .Values.customDeployKeySecret }}"{{else}}"pl-deploy-secrets"{{end}} + optional: true + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MAX_EXPECTED_CLOCK_SKEW + value: "2000" + - name: PL_RENEW_PERIOD + value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} + envFrom: + - configMapRef: + name: pl-cloud-config + - configMapRef: + name: pl-cloud-connector-tls-config + - configMapRef: + name: pl-cluster-config + optional: true + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50800 + scheme: HTTPS + name: app + ports: + - containerPort: 50800 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; + until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 + ]; do echo "waiting for ${URL}"; sleep 2; done; + env: + - name: SERVICE_NAME + value: pl-nats-mgmt + - name: SERVICE_PORT + value: "8222" + - name: HEALTH_PATH + value: "" + - name: PROTOCOL + value: http + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: nats-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: cloud-conn-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-metadata + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + px.dev/metrics_port: "50400" + px.dev/metrics_scrape: "true" + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MAX_EXPECTED_CLOCK_SKEW + value: "2000" + - name: PL_RENEW_PERIOD + value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} + - name: PL_MD_ETCD_SERVER + value: https://pl-etcd-client.$(PL_POD_NAMESPACE).svc:2379 + - name: PL_ETCD_OPERATOR_ENABLED + value: "true" + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50400 + scheme: HTTPS + initialDelaySeconds: 120 + periodSeconds: 10 + name: app + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 50400 + scheme: HTTPS + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; + until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 + ]; do echo "waiting for ${URL}"; sleep 2; done; + env: + - name: SERVICE_NAME + value: pl-nats-mgmt + - name: SERVICE_PORT + value: "8222" + - name: HEALTH_PATH + value: "" + - name: PROTOCOL + value: http + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: nats-wait + - command: + - sh + - -c + - set -xe; ETCD_PATH="${PL_MD_ETCD_SERVER}"; URL="${ETCD_PATH}${HEALTH_PATH}"; + until [ $(curl --cacert /certs/ca.crt --key /certs/client.key --cert /certs/client.crt + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; + env: + - name: HEALTH_PATH + value: /health + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MD_ETCD_SERVER + value: https://pl-etcd-client.$(PL_POD_NAMESPACE).svc:2379 + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: etcd-wait + volumeMounts: + - mountPath: /certs + name: certs + serviceAccountName: metadata-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-query-broker + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + px.dev/metrics_port: "50300" + px.dev/metrics_scrape: "true" + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + - name: PL_SENTRY_DSN + valueFrom: + secretKeyRef: + key: sentry-dsn + name: pl-cluster-secrets + optional: true + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_POD_IP_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_CLOUD_ADDR + valueFrom: + configMapKeyRef: + key: PL_CLOUD_ADDR + name: pl-cloud-config + - name: PL_DATA_ACCESS + value: {{ if .Values.dataAccess }}"{{ .Values.dataAccess }}"{{else}}"Full"{{end}} + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50300 + scheme: HTTPS + name: app + ports: + - containerPort: 50300 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-cloud-connector-svc + - name: SERVICE_PORT + value: "50800" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: cc-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-metadata-svc + - name: SERVICE_PORT + value: "50400" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: mds-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: query-broker-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - configMap: + name: proxy-envoy-config + name: envoy-yaml +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-pem + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-pem + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-pem + plane: data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - args: [] + env: + - name: PL_PEM_ENV_VAR_PLACEHOLDER + value: "true" # This is un-used, and is just a placeholder used to templatize our YAMLs for Helm. + {{- range $key, $value := .Values.customPEMFlags}} + - name: {{$key}} + value: "{{$value}}" + {{- end}} + {{- if .Values.datastreamBufferSpikeSize }} + - name: PL_DATASTREAM_BUFFER_SPIKE_SIZE + value: "{{ .Values.datastreamBufferSpikeSize }}" + {{- end}} + {{- if .Values.datastreamBufferSize }} + - name: PL_DATASTREAM_BUFFER_SIZE + value: "{{ .Values.datastreamBufferSize }}" + {{- end}} + - name: TCMALLOC_SAMPLE_PARAMETER + value: "1048576" + - name: PL_CLIENT_TLS_CERT + value: /certs/client.crt + - name: PL_CLIENT_TLS_KEY + value: /certs/client.key + - name: PL_TLS_CA_CERT + value: /certs/ca.crt + - name: PL_DISABLE_SSL + value: "false" + - name: PL_HOST_PATH + value: /host + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_VIZIER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_CLOCK_CONVERTER + value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15{{end}}' + name: pem + resources: + limits: + memory: {{ if .Values.pemMemoryLimit }}"{{ .Values.pemMemoryLimit }}"{{else}}"2Gi"{{end}} + requests: + memory: {{ if .Values.pemMemoryRequest }}"{{ .Values.pemMemoryRequest }}"{{else}}"2Gi"{{end}} + securityContext: + capabilities: + add: + - SYS_PTRACE + - SYS_ADMIN + privileged: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /host + name: host-root + readOnly: true + - mountPath: /sys + name: sys + readOnly: true + - mountPath: /certs + name: certs + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-query-broker-svc + - name: SERVICE_PORT + value: "50300" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: qb-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: host-root + - hostPath: + path: /sys + type: Directory + name: sys + - name: certs + secret: + secretName: service-tls-certs + updateStrategy: + rollingUpdate: + maxUnavailable: 20 + type: RollingUpdate +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cert-provisioner-job + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + backoffLimit: 1 + completions: 1 + parallelism: 1 + template: + metadata: + labels: + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cert-provisioner-job + spec: + containers: + - env: + - name: PL_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: pl-cloud-config + - configMapRef: + name: pl-cluster-config + optional: true + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15{{end}}' + name: provisioner + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: pl-cert-provisioner-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + +{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml new file mode 100644 index 00000000000..815d4c3c6d9 --- /dev/null +++ b/vizier-chart/templates/04_vizier_persistent.yaml @@ -0,0 +1,2337 @@ +{{if and (not .Values.autopilot) (not .Values.useEtcdOperator)}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-ns-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - services + - events + - pods/log + verbs: + - get + - watch + - list +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - px.dev + resources: + - viziers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-conn-election + resources: + - leases + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - pods + - services + - persistentvolumes + - persistentvolumeclaims + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + - pods/log + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-conn-election + - metadata-election + resources: + - leases + verbs: + - get + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - px.dev + resources: + - viziers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - px.dev + resources: + - viziers/status + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-vizier-crd-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - px.dev + resources: + - viziers + - viziers/status + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - metadata-election + resources: + - leases + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + creationTimestamp: null + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list +- apiGroups: + - "" + resourceNames: + - kube-system + resources: + - namespaces + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-node-view + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-cluster-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resourceNames: + - kube-system + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + - pods + - services + - endpoints + - namespaces + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - watch + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - namespaces + verbs: + - watch + - get + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-cert-provisioner-role +subjects: +- kind: ServiceAccount + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-cloud-connector-ns-role +subjects: +- kind: ServiceAccount + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-updater-role +subjects: +- kind: ServiceAccount + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-crd-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: default + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-crd-metadata-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-metadata-role +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-query-broker-role +subjects: +- kind: ServiceAccount + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-crd-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-cloud-connector-role +subjects: +- kind: ServiceAccount + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-node-view-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-node-view +subjects: +- kind: ServiceAccount + name: default + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-updater-cluster-role +subjects: +- kind: ServiceAccount + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-vizier-metadata +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-node-view-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-node-view +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CLIENT_TLS_CERT: /certs/client.crt + PL_CLIENT_TLS_KEY: /certs/client.key + PL_SERVER_TLS_CERT: /certs/server.crt + PL_SERVER_TLS_KEY: /certs/server.key + PL_TLS_CA_CERT: /certs/ca.crt +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-tls-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CLIENT_TLS_CERT: /certs/client.crt + PL_CLIENT_TLS_KEY: /certs/client.key + PL_SERVER_TLS_CERT: /certs/server.crt + PL_SERVER_TLS_KEY: /certs/server.key + PL_TLS_CA_CERT: /certs/ca.crt +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-tls-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin-service + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + clusterIP: None + ports: + - name: tcp-http2 + port: 59300 + protocol: TCP + targetPort: 59300 + selector: + app: pl-monitoring + component: vizier + name: kelvin + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: vizier-cloud-connector-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50800 + protocol: TCP + targetPort: 50800 + selector: + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + vizier-bootstrap: "true" + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50400 + protocol: TCP + targetPort: 50400 + selector: + app: pl-monitoring + component: vizier + name: vizier-metadata + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50300 + protocol: TCP + targetPort: 50300 + - name: tcp-grpc-web + port: 50305 + protocol: TCP + targetPort: 50305 + selector: + app: pl-monitoring + component: vizier + name: vizier-query-broker + type: ClusterIP +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-pv-claim + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 16Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: kelvin + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin + plane: data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + - name: PL_SENTRY_DSN + valueFrom: + secretKeyRef: + key: sentry-dsn + name: pl-cluster-secrets + optional: true + - name: PL_HOST_PATH + value: /host + - name: PL_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PL_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_VIZIER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: TCMALLOC_SAMPLE_PARAMETER + value: "1048576" + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15{{end}}' + name: app + ports: + - containerPort: 59300 + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + - mountPath: /sys + name: sys + readOnly: true + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-cloud-connector-svc + - name: SERVICE_PORT + value: "50800" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: cc-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-query-broker-svc + - name: SERVICE_PORT + value: "50300" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: qb-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - hostPath: + path: /sys + type: Directory + name: sys +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: vizier-cloud-connector + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + vizier-bootstrap: "true" + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + plane: control + vizier-bootstrap: "true" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_DEPLOY_KEY + valueFrom: + secretKeyRef: + key: deploy-key + name: {{ if .Values.customDeployKeySecret }}"{{ .Values.customDeployKeySecret }}"{{else}}"pl-deploy-secrets"{{end}} + optional: true + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MAX_EXPECTED_CLOCK_SKEW + value: "2000" + - name: PL_RENEW_PERIOD + value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} + envFrom: + - configMapRef: + name: pl-cloud-config + - configMapRef: + name: pl-cloud-connector-tls-config + - configMapRef: + name: pl-cluster-config + optional: true + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50800 + scheme: HTTPS + name: app + ports: + - containerPort: 50800 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; + until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 + ]; do echo "waiting for ${URL}"; sleep 2; done; + env: + - name: SERVICE_NAME + value: pl-nats-mgmt + - name: SERVICE_PORT + value: "8222" + - name: HEALTH_PATH + value: "" + - name: PROTOCOL + value: http + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: nats-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: cloud-conn-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-query-broker + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + px.dev/metrics_port: "50300" + px.dev/metrics_scrape: "true" + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + - name: PL_SENTRY_DSN + valueFrom: + secretKeyRef: + key: sentry-dsn + name: pl-cluster-secrets + optional: true + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_POD_IP_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_CLOUD_ADDR + valueFrom: + configMapKeyRef: + key: PL_CLOUD_ADDR + name: pl-cloud-config + - name: PL_DATA_ACCESS + value: {{ if .Values.dataAccess }}"{{ .Values.dataAccess }}"{{else}}"Full"{{end}} + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50300 + scheme: HTTPS + name: app + ports: + - containerPort: 50300 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-cloud-connector-svc + - name: SERVICE_PORT + value: "50800" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: cc-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-metadata-svc + - name: SERVICE_PORT + value: "50400" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: mds-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: query-broker-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - configMap: + name: proxy-envoy-config + name: envoy-yaml +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-metadata + serviceName: vizier-metadata + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + px.dev/metrics_port: "50400" + px.dev/metrics_scrape: "true" + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MAX_EXPECTED_CLOCK_SKEW + value: "2000" + - name: PL_RENEW_PERIOD + value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} + - name: PL_ETCD_OPERATOR_ENABLED + value: "false" + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50400 + scheme: HTTPS + initialDelaySeconds: 120 + periodSeconds: 10 + name: app + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 50400 + scheme: HTTPS + initialDelaySeconds: 30 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + - mountPath: /metadata + name: metadata-volume + initContainers: + - command: + - sh + - -c + - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; + until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 + ]; do echo "waiting for ${URL}"; sleep 2; done; + env: + - name: SERVICE_NAME + value: pl-nats-mgmt + - name: SERVICE_PORT + value: "8222" + - name: HEALTH_PATH + value: "" + - name: PROTOCOL + value: http + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: nats-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: metadata-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - name: metadata-volume + persistentVolumeClaim: + claimName: metadata-pv-claim + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-pem + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-pem + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-pem + plane: data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - args: [] + env: + - name: PL_PEM_ENV_VAR_PLACEHOLDER + value: "true" # This is un-used, and is just a placeholder used to templatize our YAMLs for Helm. + {{- range $key, $value := .Values.customPEMFlags}} + - name: {{$key}} + value: "{{$value}}" + {{- end}} + {{- if .Values.datastreamBufferSpikeSize }} + - name: PL_DATASTREAM_BUFFER_SPIKE_SIZE + value: "{{ .Values.datastreamBufferSpikeSize }}" + {{- end}} + {{- if .Values.datastreamBufferSize }} + - name: PL_DATASTREAM_BUFFER_SIZE + value: "{{ .Values.datastreamBufferSize }}" + {{- end}} + - name: TCMALLOC_SAMPLE_PARAMETER + value: "1048576" + - name: PL_CLIENT_TLS_CERT + value: /certs/client.crt + - name: PL_CLIENT_TLS_KEY + value: /certs/client.key + - name: PL_TLS_CA_CERT + value: /certs/ca.crt + - name: PL_DISABLE_SSL + value: "false" + - name: PL_HOST_PATH + value: /host + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_VIZIER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_CLOCK_CONVERTER + value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15{{end}}' + name: pem + resources: + limits: + memory: {{ if .Values.pemMemoryLimit }}"{{ .Values.pemMemoryLimit }}"{{else}}"2Gi"{{end}} + requests: + memory: {{ if .Values.pemMemoryRequest }}"{{ .Values.pemMemoryRequest }}"{{else}}"2Gi"{{end}} + securityContext: + capabilities: + add: + - SYS_PTRACE + - SYS_ADMIN + privileged: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /host + name: host-root + readOnly: true + - mountPath: /sys + name: sys + readOnly: true + - mountPath: /certs + name: certs + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-query-broker-svc + - name: SERVICE_PORT + value: "50300" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: qb-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: host-root + - hostPath: + path: /sys + type: Directory + name: sys + - name: certs + secret: + secretName: service-tls-certs + updateStrategy: + rollingUpdate: + maxUnavailable: 20 + type: RollingUpdate +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cert-provisioner-job + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + backoffLimit: 1 + completions: 1 + parallelism: 1 + template: + metadata: + labels: + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cert-provisioner-job + spec: + containers: + - env: + - name: PL_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: pl-cloud-config + - configMapRef: + name: pl-cluster-config + optional: true + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15{{end}}' + name: provisioner + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: pl-cert-provisioner-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + +{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/05_vizier_etcd_ap.yaml b/vizier-chart/templates/05_vizier_etcd_ap.yaml new file mode 100644 index 00000000000..6d456aec391 --- /dev/null +++ b/vizier-chart/templates/05_vizier_etcd_ap.yaml @@ -0,0 +1,2324 @@ +{{if and (.Values.autopilot) (.Values.useEtcdOperator)}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-ns-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - services + - events + - pods/log + verbs: + - get + - watch + - list +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - px.dev + resources: + - viziers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-conn-election + resources: + - leases + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - pods + - services + - persistentvolumes + - persistentvolumeclaims + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + - pods/log + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-conn-election + - metadata-election + resources: + - leases + verbs: + - get + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - px.dev + resources: + - viziers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - px.dev + resources: + - viziers/status + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-vizier-crd-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - px.dev + resources: + - viziers + - viziers/status + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - metadata-election + resources: + - leases + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + creationTimestamp: null + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list +- apiGroups: + - "" + resourceNames: + - kube-system + resources: + - namespaces + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-node-view + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-cluster-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resourceNames: + - kube-system + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + - pods + - services + - endpoints + - namespaces + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - watch + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - namespaces + verbs: + - watch + - get + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-cert-provisioner-role +subjects: +- kind: ServiceAccount + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-cloud-connector-ns-role +subjects: +- kind: ServiceAccount + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-updater-role +subjects: +- kind: ServiceAccount + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-crd-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: default + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-crd-metadata-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-metadata-role +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-query-broker-role +subjects: +- kind: ServiceAccount + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-crd-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-cloud-connector-role +subjects: +- kind: ServiceAccount + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-node-view-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-node-view +subjects: +- kind: ServiceAccount + name: default + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-updater-cluster-role +subjects: +- kind: ServiceAccount + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-vizier-metadata +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-node-view-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-node-view +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CLIENT_TLS_CERT: /certs/client.crt + PL_CLIENT_TLS_KEY: /certs/client.key + PL_SERVER_TLS_CERT: /certs/server.crt + PL_SERVER_TLS_KEY: /certs/server.key + PL_TLS_CA_CERT: /certs/ca.crt +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-tls-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CLIENT_TLS_CERT: /certs/client.crt + PL_CLIENT_TLS_KEY: /certs/client.key + PL_SERVER_TLS_CERT: /certs/server.crt + PL_SERVER_TLS_KEY: /certs/server.key + PL_TLS_CA_CERT: /certs/ca.crt +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-tls-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin-service + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + clusterIP: None + ports: + - name: tcp-http2 + port: 59300 + protocol: TCP + targetPort: 59300 + selector: + app: pl-monitoring + component: vizier + name: kelvin + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: vizier-cloud-connector-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50800 + protocol: TCP + targetPort: 50800 + selector: + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + vizier-bootstrap: "true" + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50400 + protocol: TCP + targetPort: 50400 + selector: + app: pl-monitoring + component: vizier + name: vizier-metadata + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50300 + protocol: TCP + targetPort: 50300 + - name: tcp-grpc-web + port: 50305 + protocol: TCP + targetPort: 50305 + selector: + app: pl-monitoring + component: vizier + name: vizier-query-broker + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: kelvin + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin + plane: data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + - name: PL_SENTRY_DSN + valueFrom: + secretKeyRef: + key: sentry-dsn + name: pl-cluster-secrets + optional: true + - name: PL_HOST_PATH + value: /host + - name: PL_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PL_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_VIZIER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: TCMALLOC_SAMPLE_PARAMETER + value: "1048576" + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15{{end}}' + name: app + ports: + - containerPort: 59300 + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + - mountPath: /sys + name: sys + readOnly: true + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-cloud-connector-svc + - name: SERVICE_PORT + value: "50800" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: cc-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-query-broker-svc + - name: SERVICE_PORT + value: "50300" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: qb-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - hostPath: + path: /sys + type: Directory + name: sys +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: vizier-cloud-connector + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + vizier-bootstrap: "true" + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + plane: control + vizier-bootstrap: "true" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_DEPLOY_KEY + valueFrom: + secretKeyRef: + key: deploy-key + name: {{ if .Values.customDeployKeySecret }}"{{ .Values.customDeployKeySecret }}"{{else}}"pl-deploy-secrets"{{end}} + optional: true + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MAX_EXPECTED_CLOCK_SKEW + value: "2000" + - name: PL_RENEW_PERIOD + value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} + envFrom: + - configMapRef: + name: pl-cloud-config + - configMapRef: + name: pl-cloud-connector-tls-config + - configMapRef: + name: pl-cluster-config + optional: true + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50800 + scheme: HTTPS + name: app + ports: + - containerPort: 50800 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; + until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 + ]; do echo "waiting for ${URL}"; sleep 2; done; + env: + - name: SERVICE_NAME + value: pl-nats-mgmt + - name: SERVICE_PORT + value: "8222" + - name: HEALTH_PATH + value: "" + - name: PROTOCOL + value: http + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: nats-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: cloud-conn-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-metadata + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + px.dev/metrics_port: "50400" + px.dev/metrics_scrape: "true" + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MAX_EXPECTED_CLOCK_SKEW + value: "2000" + - name: PL_RENEW_PERIOD + value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} + - name: PL_MD_ETCD_SERVER + value: https://pl-etcd-client.$(PL_POD_NAMESPACE).svc:2379 + - name: PL_ETCD_OPERATOR_ENABLED + value: "true" + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50400 + scheme: HTTPS + initialDelaySeconds: 120 + periodSeconds: 10 + name: app + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 50400 + scheme: HTTPS + initialDelaySeconds: 30 + periodSeconds: 10 + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; + until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 + ]; do echo "waiting for ${URL}"; sleep 2; done; + env: + - name: SERVICE_NAME + value: pl-nats-mgmt + - name: SERVICE_PORT + value: "8222" + - name: HEALTH_PATH + value: "" + - name: PROTOCOL + value: http + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: nats-wait + - command: + - sh + - -c + - set -xe; ETCD_PATH="${PL_MD_ETCD_SERVER}"; URL="${ETCD_PATH}${HEALTH_PATH}"; + until [ $(curl --cacert /certs/ca.crt --key /certs/client.key --cert /certs/client.crt + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; + env: + - name: HEALTH_PATH + value: /health + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MD_ETCD_SERVER + value: https://pl-etcd-client.$(PL_POD_NAMESPACE).svc:2379 + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: etcd-wait + volumeMounts: + - mountPath: /certs + name: certs + serviceAccountName: metadata-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-query-broker + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + px.dev/metrics_port: "50300" + px.dev/metrics_scrape: "true" + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + - name: PL_SENTRY_DSN + valueFrom: + secretKeyRef: + key: sentry-dsn + name: pl-cluster-secrets + optional: true + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_POD_IP_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_CLOUD_ADDR + valueFrom: + configMapKeyRef: + key: PL_CLOUD_ADDR + name: pl-cloud-config + - name: PL_DATA_ACCESS + value: {{ if .Values.dataAccess }}"{{ .Values.dataAccess }}"{{else}}"Full"{{end}} + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50300 + scheme: HTTPS + name: app + ports: + - containerPort: 50300 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-cloud-connector-svc + - name: SERVICE_PORT + value: "50800" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: cc-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-metadata-svc + - name: SERVICE_PORT + value: "50400" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: mds-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: query-broker-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - configMap: + name: proxy-envoy-config + name: envoy-yaml +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-pem + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-pem + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-pem + plane: data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - args: [] + env: + - name: PL_PEM_ENV_VAR_PLACEHOLDER + value: "true" # This is un-used, and is just a placeholder used to templatize our YAMLs for Helm. + {{- range $key, $value := .Values.customPEMFlags}} + - name: {{$key}} + value: "{{$value}}" + {{- end}} + {{- if .Values.datastreamBufferSpikeSize }} + - name: PL_DATASTREAM_BUFFER_SPIKE_SIZE + value: "{{ .Values.datastreamBufferSpikeSize }}" + {{- end}} + {{- if .Values.datastreamBufferSize }} + - name: PL_DATASTREAM_BUFFER_SIZE + value: "{{ .Values.datastreamBufferSize }}" + {{- end}} + - name: TCMALLOC_SAMPLE_PARAMETER + value: "1048576" + - name: PL_CLIENT_TLS_CERT + value: /certs/client.crt + - name: PL_CLIENT_TLS_KEY + value: /certs/client.key + - name: PL_TLS_CA_CERT + value: /certs/ca.crt + - name: PL_DISABLE_SSL + value: "false" + - name: PL_HOST_PATH + value: /host + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_VIZIER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_CLOCK_CONVERTER + value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15{{end}}' + name: pem + resources: + limits: + memory: {{ if .Values.pemMemoryLimit }}"{{ .Values.pemMemoryLimit }}"{{else}}"2Gi"{{end}} + requests: + memory: {{ if .Values.pemMemoryRequest }}"{{ .Values.pemMemoryRequest }}"{{else}}"2Gi"{{end}} + securityContext: + capabilities: + add: + - SYS_PTRACE + - SYS_ADMIN + privileged: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /host/lib + name: host-lib + readOnly: true + - mountPath: /host/var + name: host-var + readOnly: true + - mountPath: /host/boot + name: host-boot + readOnly: true + - mountPath: /host/etc + name: host-etc + readOnly: true + - mountPath: /sys + name: sys + readOnly: true + - mountPath: /certs + name: certs + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-query-broker-svc + - name: SERVICE_PORT + value: "50300" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: qb-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /lib + type: Directory + name: host-lib + - hostPath: + path: /var + type: Directory + name: host-var + - hostPath: + path: /boot + type: Directory + name: host-boot + - hostPath: + path: /etc + type: Directory + name: host-etc + - hostPath: + path: /sys + type: Directory + name: sys + - name: certs + secret: + secretName: service-tls-certs + updateStrategy: + rollingUpdate: + maxUnavailable: 20 + type: RollingUpdate +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cert-provisioner-job + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + backoffLimit: 1 + completions: 1 + parallelism: 1 + template: + metadata: + labels: + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cert-provisioner-job + spec: + containers: + - env: + - name: PL_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: pl-cloud-config + - configMapRef: + name: pl-cluster-config + optional: true + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15{{end}}' + name: provisioner + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: pl-cert-provisioner-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + +{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/06_vizier_persistent_ap.yaml b/vizier-chart/templates/06_vizier_persistent_ap.yaml new file mode 100644 index 00000000000..99ce3411b98 --- /dev/null +++ b/vizier-chart/templates/06_vizier_persistent_ap.yaml @@ -0,0 +1,2358 @@ +{{if and (.Values.autopilot) (not .Values.useEtcdOperator)}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-ns-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - services + - events + - pods/log + verbs: + - get + - watch + - list +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - px.dev + resources: + - viziers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-conn-election + resources: + - leases + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - pods + - services + - persistentvolumes + - persistentvolumeclaims + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + - pods/log + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-conn-election + - metadata-election + resources: + - leases + verbs: + - get + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - px.dev + resources: + - viziers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - px.dev + resources: + - viziers/status + verbs: + - get + - list + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-vizier-crd-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - px.dev + resources: + - viziers + - viziers/status + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resourceNames: + - metadata-election + resources: + - leases + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + creationTimestamp: null + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list +- apiGroups: + - "" + resourceNames: + - kube-system + resources: + - namespaces + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-node-view + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-cluster-role + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resourceNames: + - kube-system + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + - pods + - services + - endpoints + - namespaces + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - watch + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +rules: +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - namespaces + verbs: + - watch + - get + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - watch + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-cert-provisioner-role +subjects: +- kind: ServiceAccount + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-cloud-connector-ns-role +subjects: +- kind: ServiceAccount + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-updater-role +subjects: +- kind: ServiceAccount + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-crd-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: default + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-crd-metadata-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-metadata-role +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-query-broker-role +subjects: +- kind: ServiceAccount + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-query-broker-crd-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-vizier-crd-role +subjects: +- kind: ServiceAccount + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-cloud-connector-role +subjects: +- kind: ServiceAccount + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-node-view-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-node-view +subjects: +- kind: ServiceAccount + name: default + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-updater-cluster-role +subjects: +- kind: ServiceAccount + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-vizier-metadata +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-vizier-metadata-node-view-cluster-binding + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-node-view +subjects: +- kind: ServiceAccount + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CLIENT_TLS_CERT: /certs/client.crt + PL_CLIENT_TLS_KEY: /certs/client.key + PL_SERVER_TLS_CERT: /certs/server.crt + PL_SERVER_TLS_KEY: /certs/server.key + PL_TLS_CA_CERT: /certs/ca.crt +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cloud-connector-tls-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +data: + PL_CLIENT_TLS_CERT: /certs/client.crt + PL_CLIENT_TLS_KEY: /certs/client.key + PL_SERVER_TLS_CERT: /certs/server.crt + PL_SERVER_TLS_KEY: /certs/server.key + PL_TLS_CA_CERT: /certs/ca.crt +kind: ConfigMap +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: pl-tls-config + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin-service + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + clusterIP: None + ports: + - name: tcp-http2 + port: 59300 + protocol: TCP + targetPort: 59300 + selector: + app: pl-monitoring + component: vizier + name: kelvin + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: vizier-cloud-connector-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50800 + protocol: TCP + targetPort: 50800 + selector: + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + vizier-bootstrap: "true" + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50400 + protocol: TCP + targetPort: 50400 + selector: + app: pl-monitoring + component: vizier + name: vizier-metadata + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker-svc + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + ports: + - name: tcp-http2 + port: 50300 + protocol: TCP + targetPort: 50300 + - name: tcp-grpc-web + port: 50305 + protocol: TCP + targetPort: 50305 + selector: + app: pl-monitoring + component: vizier + name: vizier-query-broker + type: ClusterIP +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-pv-claim + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 16Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: kelvin + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: kelvin + plane: data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + - name: PL_SENTRY_DSN + valueFrom: + secretKeyRef: + key: sentry-dsn + name: pl-cluster-secrets + optional: true + - name: PL_HOST_PATH + value: /host + - name: PL_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PL_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_VIZIER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: TCMALLOC_SAMPLE_PARAMETER + value: "1048576" + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15{{end}}' + name: app + ports: + - containerPort: 59300 + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + - mountPath: /sys + name: sys + readOnly: true + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-cloud-connector-svc + - name: SERVICE_PORT + value: "50800" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: cc-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-query-broker-svc + - name: SERVICE_PORT + value: "50300" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: qb-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - hostPath: + path: /sys + type: Directory + name: sys +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: vizier-cloud-connector + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + vizier-bootstrap: "true" + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-cloud-connector + plane: control + vizier-bootstrap: "true" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_DEPLOY_KEY + valueFrom: + secretKeyRef: + key: deploy-key + name: {{ if .Values.customDeployKeySecret }}"{{ .Values.customDeployKeySecret }}"{{else}}"pl-deploy-secrets"{{end}} + optional: true + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MAX_EXPECTED_CLOCK_SKEW + value: "2000" + - name: PL_RENEW_PERIOD + value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} + envFrom: + - configMapRef: + name: pl-cloud-config + - configMapRef: + name: pl-cloud-connector-tls-config + - configMapRef: + name: pl-cluster-config + optional: true + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50800 + scheme: HTTPS + name: app + ports: + - containerPort: 50800 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; + until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 + ]; do echo "waiting for ${URL}"; sleep 2; done; + env: + - name: SERVICE_NAME + value: pl-nats-mgmt + - name: SERVICE_PORT + value: "8222" + - name: HEALTH_PATH + value: "" + - name: PROTOCOL + value: http + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: nats-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: cloud-conn-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-query-broker + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + px.dev/metrics_port: "50300" + px.dev/metrics_scrape: "true" + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-query-broker + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_CLUSTER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + - name: PL_SENTRY_DSN + valueFrom: + secretKeyRef: + key: sentry-dsn + name: pl-cluster-secrets + optional: true + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_POD_IP_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_CLOUD_ADDR + valueFrom: + configMapKeyRef: + key: PL_CLOUD_ADDR + name: pl-cloud-config + - name: PL_DATA_ACCESS + value: {{ if .Values.dataAccess }}"{{ .Values.dataAccess }}"{{else}}"Full"{{end}} + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50300 + scheme: HTTPS + name: app + ports: + - containerPort: 50300 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl + -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting + for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-cloud-connector-svc + - name: SERVICE_PORT + value: "50800" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: cc-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-metadata-svc + - name: SERVICE_PORT + value: "50400" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: mds-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: query-broker-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - configMap: + name: proxy-envoy-config + name: envoy-yaml +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + replicas: 1 + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-metadata + serviceName: vizier-metadata + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + px.dev/metrics_port: "50400" + px.dev/metrics_scrape: "true" + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-metadata + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - env: + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_MAX_EXPECTED_CLOCK_SKEW + value: "2000" + - name: PL_RENEW_PERIOD + value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} + - name: PL_ETCD_OPERATOR_ENABLED + value: "false" + envFrom: + - configMapRef: + name: pl-tls-config + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15{{end}}' + livenessProbe: + httpGet: + path: /healthz + port: 50400 + scheme: HTTPS + initialDelaySeconds: 120 + periodSeconds: 10 + name: app + readinessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 50400 + scheme: HTTPS + initialDelaySeconds: 30 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /certs + name: certs + - mountPath: /metadata + name: metadata-volume + initContainers: + - command: + - sh + - -c + - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; + until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 + ]; do echo "waiting for ${URL}"; sleep 2; done; + env: + - name: SERVICE_NAME + value: pl-nats-mgmt + - name: SERVICE_PORT + value: "8222" + - name: HEALTH_PATH + value: "" + - name: PROTOCOL + value: http + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: nats-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: metadata-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + volumes: + - name: certs + secret: + secretName: service-tls-certs + - name: metadata-volume + persistentVolumeClaim: + claimName: metadata-pv-claim + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-pem + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + selector: + matchLabels: + app: pl-monitoring + component: vizier + name: vizier-pem + template: + metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: vizier-pem + plane: data + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + containers: + - args: [] + env: + - name: PL_PEM_ENV_VAR_PLACEHOLDER + value: "true" # This is un-used, and is just a placeholder used to templatize our YAMLs for Helm. + {{- range $key, $value := .Values.customPEMFlags}} + - name: {{$key}} + value: "{{$value}}" + {{- end}} + {{- if .Values.datastreamBufferSpikeSize }} + - name: PL_DATASTREAM_BUFFER_SPIKE_SIZE + value: "{{ .Values.datastreamBufferSpikeSize }}" + {{- end}} + {{- if .Values.datastreamBufferSize }} + - name: PL_DATASTREAM_BUFFER_SIZE + value: "{{ .Values.datastreamBufferSize }}" + {{- end}} + - name: TCMALLOC_SAMPLE_PARAMETER + value: "1048576" + - name: PL_CLIENT_TLS_CERT + value: /certs/client.crt + - name: PL_CLIENT_TLS_KEY + value: /certs/client.key + - name: PL_TLS_CA_CERT + value: /certs/ca.crt + - name: PL_DISABLE_SSL + value: "false" + - name: PL_HOST_PATH + value: /host + - name: PL_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: PL_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PL_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PL_JWT_SIGNING_KEY + valueFrom: + secretKeyRef: + key: jwt-signing-key + name: pl-cluster-secrets + - name: PL_VIZIER_ID + valueFrom: + secretKeyRef: + key: cluster-id + name: pl-cluster-secrets + optional: true + - name: PL_VIZIER_NAME + valueFrom: + secretKeyRef: + key: cluster-name + name: pl-cluster-secrets + optional: true + - name: PL_CLOCK_CONVERTER + value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15{{end}}' + name: pem + resources: + limits: + memory: {{ if .Values.pemMemoryLimit }}"{{ .Values.pemMemoryLimit }}"{{else}}"2Gi"{{end}} + requests: + memory: {{ if .Values.pemMemoryRequest }}"{{ .Values.pemMemoryRequest }}"{{else}}"2Gi"{{end}} + securityContext: + capabilities: + add: + - SYS_PTRACE + - SYS_ADMIN + privileged: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /host/lib + name: host-lib + readOnly: true + - mountPath: /host/var + name: host-var + readOnly: true + - mountPath: /host/boot + name: host-boot + readOnly: true + - mountPath: /host/etc + name: host-etc + readOnly: true + - mountPath: /sys + name: sys + readOnly: true + - mountPath: /certs + name: certs + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + initContainers: + - command: + - sh + - -c + - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ + $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do + echo "waiting for ${URL}"; sleep 2; done; ' + env: + - name: SERVICE_NAME + value: vizier-query-broker-svc + - name: SERVICE_PORT + value: "50300" + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' + name: qb-wait + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /lib + type: Directory + name: host-lib + - hostPath: + path: /var + type: Directory + name: host-var + - hostPath: + path: /boot + type: Directory + name: host-boot + - hostPath: + path: /etc + type: Directory + name: host-etc + - hostPath: + path: /sys + type: Directory + name: sys + - name: certs + secret: + secretName: service-tls-certs + updateStrategy: + rollingUpdate: + maxUnavailable: 20 + type: RollingUpdate +--- +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cert-provisioner-job + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +spec: + backoffLimit: 1 + completions: 1 + parallelism: 1 + template: + metadata: + labels: + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cert-provisioner-job + spec: + containers: + - env: + - name: PL_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + envFrom: + - configMapRef: + name: pl-cloud-config + - configMapRef: + name: pl-cluster-config + optional: true + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15{{end}}' + name: provisioner + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + restartPolicy: Never + securityContext: + fsGroup: 10100 + runAsGroup: 10100 + runAsNonRoot: true + runAsUser: 10100 + seccompProfile: + type: RuntimeDefault + serviceAccountName: pl-cert-provisioner-service-account + tolerations: + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: amd64 + - effect: NoSchedule + key: kubernetes.io/arch + operator: Equal + value: arm64 + - effect: NoExecute + key: kubernetes.io/arch + operator: Equal + value: arm64 + +{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/image-replace.sh b/vizier-chart/templates/image-replace.sh new file mode 100755 index 00000000000..b8a647f35e5 --- /dev/null +++ b/vizier-chart/templates/image-replace.sh @@ -0,0 +1,63 @@ +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +00_secrets.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +01_nats.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +02_etcd.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +03_vizier_etcd.yaml + + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +04_vizier_persistent.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +05_vizier_etcd_ap.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +06_vizier_persistent_ap.yaml \ No newline at end of file diff --git a/vizier-chart/values.yaml b/vizier-chart/values.yaml new file mode 100644 index 00000000000..7676b4be8d8 --- /dev/null +++ b/vizier-chart/values.yaml @@ -0,0 +1,5 @@ +deployKey: +clusterName: honeypixie +devCloudNamespace: plc +namespace: pl +imageTag: 2025-05-07_08-37-30.237_UTC \ No newline at end of file From 4a94d6065950ca067d54639773431e6b98d9edf1 Mon Sep 17 00:00:00 2001 From: entlein Date: Wed, 7 May 2025 20:20:00 +0200 Subject: [PATCH 29/86] more replacements Signed-off-by: entlein --- vizier-chart/templates/03_vizier_etcd.yaml | 12 ++-- .../templates/04_vizier_persistent.yaml | 12 ++-- vizier-chart/templates/05_vizier_etcd_ap.yaml | 12 ++-- .../templates/06_vizier_persistent_ap.yaml | 12 ++-- vizier-chart/templates/image-replace.sh | 67 +++++++++++++++++++ 5 files changed, 91 insertions(+), 24 deletions(-) diff --git a/vizier-chart/templates/03_vizier_etcd.yaml b/vizier-chart/templates/03_vizier_etcd.yaml index 47eeb6bb6e5..cda9fa2a789 100644 --- a/vizier-chart/templates/03_vizier_etcd.yaml +++ b/vizier-chart/templates/03_vizier_etcd.yaml @@ -1363,7 +1363,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' name: app ports: - containerPort: 59300 @@ -1567,7 +1567,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1732,7 +1732,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1923,7 +1923,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -2142,7 +2142,7 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{end}}' name: pem resources: limits: @@ -2264,7 +2264,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' name: provisioner securityContext: allowPrivilegeEscalation: false diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml index 815d4c3c6d9..87ca06b8448 100644 --- a/vizier-chart/templates/04_vizier_persistent.yaml +++ b/vizier-chart/templates/04_vizier_persistent.yaml @@ -1391,7 +1391,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' name: app ports: - containerPort: 59300 @@ -1595,7 +1595,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1778,7 +1778,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1961,7 +1961,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -2176,7 +2176,7 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{end}}' name: pem resources: limits: @@ -2298,7 +2298,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' name: provisioner securityContext: allowPrivilegeEscalation: false diff --git a/vizier-chart/templates/05_vizier_etcd_ap.yaml b/vizier-chart/templates/05_vizier_etcd_ap.yaml index 6d456aec391..55f4a473bc1 100644 --- a/vizier-chart/templates/05_vizier_etcd_ap.yaml +++ b/vizier-chart/templates/05_vizier_etcd_ap.yaml @@ -1363,7 +1363,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' name: app ports: - containerPort: 59300 @@ -1567,7 +1567,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1732,7 +1732,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1923,7 +1923,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -2142,7 +2142,7 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{end}}' name: pem resources: limits: @@ -2285,7 +2285,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' name: provisioner securityContext: allowPrivilegeEscalation: false diff --git a/vizier-chart/templates/06_vizier_persistent_ap.yaml b/vizier-chart/templates/06_vizier_persistent_ap.yaml index 99ce3411b98..d0eb7e59127 100644 --- a/vizier-chart/templates/06_vizier_persistent_ap.yaml +++ b/vizier-chart/templates/06_vizier_persistent_ap.yaml @@ -1391,7 +1391,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' name: app ports: - containerPort: 59300 @@ -1595,7 +1595,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1778,7 +1778,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1961,7 +1961,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -2176,7 +2176,7 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{end}}' name: pem resources: limits: @@ -2319,7 +2319,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' name: provisioner securityContext: allowPrivilegeEscalation: false diff --git a/vizier-chart/templates/image-replace.sh b/vizier-chart/templates/image-replace.sh index b8a647f35e5..2d1dd4f9746 100755 --- a/vizier-chart/templates/image-replace.sh +++ b/vizier-chart/templates/image-replace.sh @@ -60,4 +60,71 @@ sed -i '' \ -e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ -e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ -e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +06_vizier_persistent_ap.yaml + + + + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +00_secrets.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +01_nats.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +02_etcd.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +03_vizier_etcd.yaml + + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +04_vizier_persistent.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +05_vizier_etcd_ap.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ 06_vizier_persistent_ap.yaml \ No newline at end of file From 6b325cb54a5e350d108e967b673d23a2fd880adc Mon Sep 17 00:00:00 2001 From: entlein Date: Thu, 8 May 2025 10:13:02 +0200 Subject: [PATCH 30/86] chore: amend text to resolve PR comments: highlight that minikube is optional Signed-off-by: entlein --- DEVELOPMENT.md | 61 +++++++++++++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index c5c918fe747..9a823c5845c 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -14,23 +14,27 @@ Decide first if you'd like a full buildsystem (on a VM) or a containerized dev e ### VM as buildsystem -This utilizes `chef` to setup all dependencies and is based on `ubuntu`. The VM type must support nested virtualization for `minikube` to work. +This utilizes `chef` to setup all dependencies and is based on `ubuntu`. +> [!Important] +> The below description defaults to using a `minikube` on this VM for the developer to have an `all-in-one` setup. The VM type must support nested virtualization for `minikube` to work. Please confirm that the nested virtualization really is turned on before you continue, not all VM-types support it. +> If you `bring-your-own-k8s`, you may disregard this. +```yaml +advancedMachineFeatures: + enableNestedVirtualization: true +``` The following specifics were tested on GCP on a Ubuntu 24.04 (May 2025): The initial compilation is CPU intense and `16vcpu` were a good trade-off, a balanced disk of 500 GB seems convienent and overall `n2-standard-16` works well. > [!Warning] -> The first build takes several hours and at least 160 Gb of space -> Turn on nested virtualization during provisioning and avoid the use of `spot` VMs for the first build to avoid the very long first build interrupting. If you create the VMs as templates from an image, you can later switch to more cost-effective `spot` instances. +> The first `full build` takes several hours and at least 160 Gb of space +> The first `vizier build` on these parameters takes approx. 1 hr and 45 Gb of space. -```yaml -advancedMachineFeatures: - enableNestedVirtualization: true -``` -1) Install chef and some dependencies + +#### 1) Install chef and some dependencies First, install `chef` to cook your `recipies`: @@ -39,11 +43,11 @@ curl -L https://chefdownload-community.chef.io/install.sh | sudo bash ``` You may find it helpful to use a terminal manager like `screen` or `tmux`, esp to detach the builds. ```bash -sudo apt install -y screen +sudo apt install -y screen git ``` In order to very significantly speed up your work, you may opt for a local cache directory. This can be shared between users of the VM, if both are part of the same group. -Create a cache dir under like /tmp/bazel +Create a cache dir under such as e.g. /tmp/bazel ```sh sudo groupadd bazelcache sudo usermod -aG bazelcache $USER @@ -68,7 +72,7 @@ echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc ``` -2) If using Cache, tell bazel about it +#### 2) If using cache, tell bazel about it Edit the `` into the .bazelrc and put it into your homedir: @@ -84,20 +88,26 @@ Edit the `` into the .bazelrc and put it into your homedir: cp .bazelrc ~/. ``` -3) Create/Use a registry you control and login +#### 3) Create/Use a registry you control and login ```sh docker login ghcr.io/ ``` -4) Make Minikube run and deploy a vanilla pixie +#### 4) Prepare your kubernetes + +> [!Important] +> The below description defaults to using a `minikube` on this VM for the developer to have an `all-in-one` setup. +> If you `bring-your-own-k8s`, please prepare your preferred setup and go to Step 5 If you added your user to the libvirt group (`sudo usermod -aG libvirt $USER`), starting the development environment on this VM will now work (if you did this interactively: you need to refresh your group membership, e.g. by logout/login). The following command will, amongst other things, start minikube ```sh make dev-env-start ``` -Onto this minikube, we first deploy the upstream pixie (`vizier`, `kelvin` and `pem`) using the remote cloud `export PX_CLOUD_ADDR=getcosmic.ai` . Follow https://docs.px.dev/installing-pixie/install-schemes/cli , to install the `px` command line interface and login: +#### 5) Deploy a vanilla pixie + +We first deploy the upstream pixie (`vizier`, `kelvin` and `pem`) using the remote cloud `export PX_CLOUD_ADDR=getcosmic.ai` . Follow https://docs.px.dev/installing-pixie/install-schemes/cli , to install the `px` command line interface and login: ```sh px auth login ``` @@ -113,23 +123,28 @@ You may encounter the following WARNING, which is related to the kernel headers ERR: Detected missing kernel headers on your cluster's nodes. This may cause issues with the Pixie agent. Please install kernel headers on all nodes. ``` -5) Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) +#### 6) Skaffold deploy your changes + +Once you make changes to the source code, or switch to another source code version, use Skaffold to deploy (after you have the vanilla setup working on minikube) + +Ensure that you have commented in the bazelcache-directory into the bazel config (see Step 2). -Ensure that you have commented in the bazelcache-directory into the bazel config (see Step 2) - -Check that your docker login token is still valid, then +Optional: you can make permanent your in the skaffold config: ```sh -> skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=ghcr.io/ +skaffold config set default-repo +skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot ``` + +Check that your docker login token is still valid, then -Optional: you can set default-repo on config, so that you don't need to pass it as an argument everytime ```sh -> skaffold config set default-repo ghcr.io/ -> skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot +skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo= ``` -6) Golden image + + +#### 7) Skaffold deploy your changes Once all the above is working and the first cache has been built, bake an image of your VM for safekeeping. From e078ae784ad6aef6a60f56cdef755936d991da34 Mon Sep 17 00:00:00 2001 From: entlein Date: Thu, 8 May 2025 10:19:32 +0200 Subject: [PATCH 31/86] chore: revert skaffold_visizer but add comments Signed-off-by: entlein --- skaffold/skaffold_vizier.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/skaffold/skaffold_vizier.yaml b/skaffold/skaffold_vizier.yaml index a3f61e086ad..5c7c44336d0 100644 --- a/skaffold/skaffold_vizier.yaml +++ b/skaffold/skaffold_vizier.yaml @@ -138,10 +138,15 @@ profiles: path: /manifests/kustomize/paths value: - k8s/vizier/persistent_metadata/aarch64 +# Note: You will want to stick with a sysroot based build (-p x86_64_sysroot or -p aarch64_sysroot), +# but you may want to change the --complication_mode setting based on your needs. +# opt builds remove assert/debug checks, while dbg builds work with debuggers (gdb). +# See the bazel docs for more details https://bazel.build/docs/user-manual#compilation-mode - name: x86_64_sysroot patches: - op: add path: /build/artifacts/context=./bazel/args value: - --config=x86_64_sysroot - - --compilation_mode=opt + - --compilation_mode=dbg +# - --compilation_mode=opt From 94dd78b75daab97ee7d193db16b1d4fdaca1a48d Mon Sep 17 00:00:00 2001 From: entlein Date: Thu, 8 May 2025 10:20:21 +0200 Subject: [PATCH 32/86] chore: amend text to resolve PR comments: highlight that minikube is optional Signed-off-by: entlein --- DEVELOPMENT.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 9a823c5845c..6734b945155 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -118,6 +118,8 @@ px deploy -p=1Gi ``` For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud. +Optional on `minikube`: + You may encounter the following WARNING, which is related to the kernel headers missing on the minikube node (this is not your VM node). Usually, for development purposes this is safe to ignore. Please see [pixie-issue2051](https://github.com/pixie-io/pixie/issues/2051) for further details. ``` ERR: Detected missing kernel headers on your cluster's nodes. This may cause issues with the Pixie agent. Please install kernel headers on all nodes. From 29b5314c56e8dd68162a9cc393edab65614dd9b9 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Thu, 8 May 2025 10:21:24 +0200 Subject: [PATCH 33/86] Update tools/chef/cookbooks/px_dev/recipes/linux.rb Co-authored-by: Dom Delnano Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- tools/chef/cookbooks/px_dev/recipes/linux.rb | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/chef/cookbooks/px_dev/recipes/linux.rb b/tools/chef/cookbooks/px_dev/recipes/linux.rb index 4371576ea9d..eb919f3216e 100644 --- a/tools/chef/cookbooks/px_dev/recipes/linux.rb +++ b/tools/chef/cookbooks/px_dev/recipes/linux.rb @@ -66,7 +66,6 @@ # Pixie dependencies 'mkcert', - #'coreutils' not sure about that one, need to test ] apt_package apt_pkg_list do From e3588fab56b778eadd8fb86ae03cf4add629cded Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Thu, 8 May 2025 10:21:52 +0200 Subject: [PATCH 34/86] Update DEVELOPMENT.md Co-authored-by: Dom Delnano Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 6734b945155..d1c18217a33 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -24,7 +24,7 @@ advancedMachineFeatures: enableNestedVirtualization: true ``` -The following specifics were tested on GCP on a Ubuntu 24.04 (May 2025): The initial compilation is CPU intense and `16vcpu` were a good trade-off, a balanced disk of 500 GB seems convienent and overall `n2-standard-16` works well. +The following specifics were tested on GCP on a Ubuntu 24.04 (May 2025). Please see the latest [packer file](https://github.com/pixie-io/pixie/blob/main/tools/chef/Makefile#L56) for the current supported Ubuntu version: The initial compilation is CPU intense and `16vcpu` were a good trade-off, a balanced disk of 500 GB seems convenient and overall `n2-standard-16` works well. > [!Warning] > The first `full build` takes several hours and at least 160 Gb of space From c616e6601ce99521cf20212d6954d2cde86726b7 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Thu, 8 May 2025 10:22:05 +0200 Subject: [PATCH 35/86] Update DEVELOPMENT.md Co-authored-by: Dom Delnano Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- DEVELOPMENT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index d1c18217a33..47a4f986f02 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -120,7 +120,7 @@ For reference and further information https://docs.px.dev/installing-pixie/insta Optional on `minikube`: -You may encounter the following WARNING, which is related to the kernel headers missing on the minikube node (this is not your VM node). Usually, for development purposes this is safe to ignore. Please see [pixie-issue2051](https://github.com/pixie-io/pixie/issues/2051) for further details. +You may encounter the following WARNING, which is related to the kernel headers missing on the minikube node (this is not your VM node). This is safe to ignore if Pixie starts up properly and your cluster is queryable from Pixie's [Live UI](https://docs.px.dev/using-pixie/using-live-ui). Please see [pixie-issue2051](https://github.com/pixie-io/pixie/issues/2051) for further details. ``` ERR: Detected missing kernel headers on your cluster's nodes. This may cause issues with the Pixie agent. Please install kernel headers on all nodes. ``` From c846a4d96ad8650e65c215602b773d39fa5ba556 Mon Sep 17 00:00:00 2001 From: entlein Date: Thu, 8 May 2025 10:23:26 +0200 Subject: [PATCH 36/86] chore: apply Dom s suggested edit Signed-off-by: entlein --- DEVELOPMENT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 47a4f986f02..243edc4e681 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -107,7 +107,7 @@ make dev-env-start #### 5) Deploy a vanilla pixie -We first deploy the upstream pixie (`vizier`, `kelvin` and `pem`) using the remote cloud `export PX_CLOUD_ADDR=getcosmic.ai` . Follow https://docs.px.dev/installing-pixie/install-schemes/cli , to install the `px` command line interface and login: +First deploy the upstream pixie (`vizier`, `kelvin` and `pem`) using the hosted cloud. Follow [these instructions](https://docs.px.dev/installing-pixie/install-schemes/cli) to install the `px` command line interface and Pixie: ```sh px auth login ``` From 11e0519c9571b096f369124c50d94d562e157a2f Mon Sep 17 00:00:00 2001 From: entlein Date: Thu, 8 May 2025 10:31:23 +0200 Subject: [PATCH 37/86] chore: adding the compilation mode explanation to the docs Signed-off-by: entlein --- DEVELOPMENT.md | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 243edc4e681..a507e6e6570 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -132,6 +132,23 @@ Once you make changes to the source code, or switch to another source code versi Ensure that you have commented in the bazelcache-directory into the bazel config (see Step 2). +Review the compilation-mode suits your purposes: +``` +cat skaffold/skaffold_vizier.yaml +# Note: You will want to stick with a sysroot based build (-p x86_64_sysroot or -p aarch64_sysroot), +# but you may want to change the --complication_mode setting based on your needs. +# opt builds remove assert/debug checks, while dbg builds work with debuggers (gdb). +# See the bazel docs for more details https://bazel.build/docs/user-manual#compilation-mode +- name: x86_64_sysroot + patches: + - op: add + path: /build/artifacts/context=./bazel/args + value: + - --config=x86_64_sysroot + - --compilation_mode=dbg +# - --compilation_mode=opt +``` + Optional: you can make permanent your in the skaffold config: ```sh skaffold config set default-repo @@ -146,7 +163,7 @@ skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot --default-repo=< -#### 7) Skaffold deploy your changes +#### 7) Golden Image Once all the above is working and the first cache has been built, bake an image of your VM for safekeeping. From c124e0d39609651111869697156c74ac48c023e6 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Thu, 8 May 2025 15:49:50 +0200 Subject: [PATCH 38/86] Remove trailing whitespace linux.rb linter Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- tools/chef/cookbooks/px_dev/recipes/linux.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/chef/cookbooks/px_dev/recipes/linux.rb b/tools/chef/cookbooks/px_dev/recipes/linux.rb index eb919f3216e..aea415ac1e9 100644 --- a/tools/chef/cookbooks/px_dev/recipes/linux.rb +++ b/tools/chef/cookbooks/px_dev/recipes/linux.rb @@ -61,7 +61,7 @@ 'libnss3-tools', 'libvirt-daemon-system', 'libvirt-clients', - 'qemu-kvm', + 'qemu-kvm', 'virt-manager', # Pixie dependencies From a4674d75fae8664a41a1f4f2ab1c48ae221922b1 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Thu, 8 May 2025 15:56:33 +0200 Subject: [PATCH 39/86] Remove trailing whitespace from skaffold_vizier.yaml linter Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- skaffold/skaffold_vizier.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/skaffold/skaffold_vizier.yaml b/skaffold/skaffold_vizier.yaml index 5c7c44336d0..14765d6da6f 100644 --- a/skaffold/skaffold_vizier.yaml +++ b/skaffold/skaffold_vizier.yaml @@ -139,7 +139,7 @@ profiles: value: - k8s/vizier/persistent_metadata/aarch64 # Note: You will want to stick with a sysroot based build (-p x86_64_sysroot or -p aarch64_sysroot), -# but you may want to change the --complication_mode setting based on your needs. +# but you may want to change the --complication_mode setting based on your needs. # opt builds remove assert/debug checks, while dbg builds work with debuggers (gdb). # See the bazel docs for more details https://bazel.build/docs/user-manual#compilation-mode - name: x86_64_sysroot From 5b407fff64eab836af025bd1aceaeee313a02447 Mon Sep 17 00:00:00 2001 From: entlein Date: Thu, 8 May 2025 18:05:31 +0200 Subject: [PATCH 40/86] linting: removing lots of whitespaces at the EOL Signed-off-by: entlein --- DEVELOPMENT.md | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index a507e6e6570..1c313d0b9e3 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -14,7 +14,7 @@ Decide first if you'd like a full buildsystem (on a VM) or a containerized dev e ### VM as buildsystem -This utilizes `chef` to setup all dependencies and is based on `ubuntu`. +This utilizes `chef` to setup all dependencies and is based on `ubuntu`. > [!Important] > The below description defaults to using a `minikube` on this VM for the developer to have an `all-in-one` setup. The VM type must support nested virtualization for `minikube` to work. Please confirm that the nested virtualization really is turned on before you continue, not all VM-types support it. > If you `bring-your-own-k8s`, you may disregard this. @@ -24,7 +24,7 @@ advancedMachineFeatures: enableNestedVirtualization: true ``` -The following specifics were tested on GCP on a Ubuntu 24.04 (May 2025). Please see the latest [packer file](https://github.com/pixie-io/pixie/blob/main/tools/chef/Makefile#L56) for the current supported Ubuntu version: The initial compilation is CPU intense and `16vcpu` were a good trade-off, a balanced disk of 500 GB seems convenient and overall `n2-standard-16` works well. +The following specifics were tested on GCP on a Ubuntu 24.04 (May 2025). Please see the latest [packer file](https://github.com/pixie-io/pixie/blob/main/tools/chef/Makefile#L56) for the current supported Ubuntu version: The initial compilation is CPU intense and `16vcpu` were a good trade-off, a balanced disk of 500 GB seems convenient and overall `n2-standard-16` works well. > [!Warning] > The first `full build` takes several hours and at least 160 Gb of space @@ -74,7 +74,6 @@ echo "source /opt/px_dev/pxenv.inc " >> ~/.bashrc #### 2) If using cache, tell bazel about it - Edit the `` into the .bazelrc and put it into your homedir: ``` # Global bazelrc file, see https://docs.bazel.build/versions/master/guide.html#bazelrc. @@ -89,15 +88,15 @@ cp .bazelrc ~/. ``` #### 3) Create/Use a registry you control and login - + ```sh docker login ghcr.io/ ``` -#### 4) Prepare your kubernetes +#### 4) Prepare your kubernetes > [!Important] -> The below description defaults to using a `minikube` on this VM for the developer to have an `all-in-one` setup. +> The below description defaults to using a `minikube` on this VM for the developer to have an `all-in-one` setup. > If you `bring-your-own-k8s`, please prepare your preferred setup and go to Step 5 If you added your user to the libvirt group (`sudo usermod -aG libvirt $USER`), starting the development environment on this VM will now work (if you did this interactively: you need to refresh your group membership, e.g. by logout/login). The following command will, amongst other things, start minikube @@ -118,7 +117,7 @@ px deploy -p=1Gi ``` For reference and further information https://docs.px.dev/installing-pixie/install-guides/hosted-pixie/cosmic-cloud. -Optional on `minikube`: +Optional on `minikube`: You may encounter the following WARNING, which is related to the kernel headers missing on the minikube node (this is not your VM node). This is safe to ignore if Pixie starts up properly and your cluster is queryable from Pixie's [Live UI](https://docs.px.dev/using-pixie/using-live-ui). Please see [pixie-issue2051](https://github.com/pixie-io/pixie/issues/2051) for further details. ``` @@ -136,7 +135,7 @@ Review the compilation-mode suits your purposes: ``` cat skaffold/skaffold_vizier.yaml # Note: You will want to stick with a sysroot based build (-p x86_64_sysroot or -p aarch64_sysroot), -# but you may want to change the --complication_mode setting based on your needs. +# but you may want to change the --complication_mode setting based on your needs. # opt builds remove assert/debug checks, while dbg builds work with debuggers (gdb). # See the bazel docs for more details https://bazel.build/docs/user-manual#compilation-mode - name: x86_64_sysroot @@ -151,10 +150,10 @@ cat skaffold/skaffold_vizier.yaml Optional: you can make permanent your in the skaffold config: ```sh -skaffold config set default-repo +skaffold config set default-repo skaffold run -f skaffold/skaffold_vizier.yaml -p x86_64_sysroot ``` - + Check that your docker login token is still valid, then ```sh @@ -298,6 +297,4 @@ export PL_TESTING_ENV=dev You will be able to run any of the CLI commands using `bazel run`. - `bazel run //src/pixie_cli:px -- deploy` will be equivalent to `px deploy` -- `bazel run //src/pixie_cli:px -- run px/cluster` is the same as `px run px/cluster` - - +- `bazel run //src/pixie_cli:px -- run px/cluster` is the same as `px run px/cluster` \ No newline at end of file From fd7d4eafe75e326be3af59ba3122136586205776 Mon Sep 17 00:00:00 2001 From: entlein Date: Thu, 8 May 2025 18:43:19 +0200 Subject: [PATCH 41/86] linting: file must end on a newline Signed-off-by: entlein --- DEVELOPMENT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 1c313d0b9e3..2629496c5e1 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -297,4 +297,4 @@ export PL_TESTING_ENV=dev You will be able to run any of the CLI commands using `bazel run`. - `bazel run //src/pixie_cli:px -- deploy` will be equivalent to `px deploy` -- `bazel run //src/pixie_cli:px -- run px/cluster` is the same as `px run px/cluster` \ No newline at end of file +- `bazel run //src/pixie_cli:px -- run px/cluster` is the same as `px run px/cluster` From dfb198b5bbfbc32a8678b2595f93607956076bdb Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Fri, 9 May 2025 09:19:53 -0700 Subject: [PATCH 42/86] Merge pipeline-support-wip Signed-off-by: Dom Del Nano --- .arclint | 1 + .bazelrc | 3 + BUILD.bazel | 17 + WORKSPACE | 2 +- demos/log-generator/log-generator.yaml | 89 + skaffold/skaffold_vizier.yaml | 18 +- .../standalone_pem_example/example.go | 66 +- src/api/go/pxapi/vizier.go | 2 + src/carnot/carnot.cc | 4 +- src/carnot/carnot_test.cc | 2 +- src/carnot/exec/BUILD.bazel | 2 + src/carnot/exec/exec_graph_test.cc | 245 +- src/carnot/exec/exec_node.h | 121 +- src/carnot/exec/exec_state.h | 12 +- src/carnot/exec/grpc_sink_node_benchmark.cc | 3 +- src/carnot/exec/grpc_sink_node_test.cc | 22 +- src/carnot/exec/memory_sink_node.cc | 3 +- src/carnot/exec/memory_sink_node_test.cc | 6 +- src/carnot/exec/memory_source_node.cc | 6 +- src/carnot/exec/memory_source_node.h | 2 +- src/carnot/exec/memory_source_node_test.cc | 12 +- src/carnot/exec/otel_export_sink_node.cc | 3 +- src/carnot/exec/otel_export_sink_node_test.cc | 39 +- src/carnot/exec/test_utils.h | 10 +- src/carnot/funcs/builtins/builtins.cc | 2 + src/carnot/funcs/builtins/pipeline_ops.cc | 39 + src/carnot/funcs/builtins/pipeline_ops.h | 83 + src/carnot/plan/operators.cc | 24 +- src/carnot/plan/operators.h | 41 +- src/carnot/planner/cgo_export.cc | 16 +- src/carnot/planner/cgo_export_test.cc | 37 + src/carnot/planner/compiler/BUILD.bazel | 1 + src/carnot/planner/compiler/ast_visitor.cc | 2 + src/carnot/planner/compiler/ast_visitor.h | 1 + .../planner/compiler/graph_comparison.h | 2 +- src/carnot/planner/compiler/test_utils.h | 8 + .../distributed/coordinator/coordinator.cc | 7 + .../coordinator/coordinator_test.cc | 81 +- .../prune_unavailable_sources_rule.cc | 3 +- .../distributed_plan/distributed_plan.cc | 4 + .../distributed/distributed_planner_test.cc | 72 + .../distributed_stitcher_rules_test.cc | 62 +- .../planner/distributed/splitter/splitter.h | 2 +- src/carnot/planner/file_source/BUILD.bazel | 52 + src/carnot/planner/file_source/file_source.cc | 27 + src/carnot/planner/file_source/file_source.h | 37 + .../planner/file_source/file_source_test.cc | 91 + src/carnot/planner/file_source/ir/BUILD.bazel | 41 + .../planner/file_source/ir/logical.pb.go | 567 + .../planner/file_source/ir/logical.proto | 39 + src/carnot/planner/file_source/log_module.cc | 104 + src/carnot/planner/file_source/log_module.h | 69 + src/carnot/planner/ir/grpc_sink_ir.cc | 3 + src/carnot/planner/ir/grpc_sink_ir.h | 16 +- src/carnot/planner/ir/ir.h | 20 +- src/carnot/planner/ir/memory_sink_ir.cc | 2 + src/carnot/planner/ir/memory_sink_ir.h | 5 +- src/carnot/planner/ir/memory_source_ir.cc | 1 + src/carnot/planner/ir/memory_source_ir.h | 4 +- src/carnot/planner/ir/operator_ir.h | 34 + src/carnot/planner/ir/otel_export_sink_ir.cc | 71 + src/carnot/planner/ir/otel_export_sink_ir.h | 17 +- .../planner/ir/otel_export_sink_ir_test.cc | 133 + src/carnot/planner/logical_planner_test.cc | 148 +- src/carnot/planner/objects/BUILD.bazel | 1 + src/carnot/planner/objects/dataframe.cc | 56 +- src/carnot/planner/objects/dataframe.h | 23 +- src/carnot/planner/objects/otel.cc | 80 +- src/carnot/planner/objects/otel.h | 52 +- src/carnot/planner/objects/otel_test.cc | 97 + src/carnot/planner/objects/qlobject.h | 1 + src/carnot/planner/plannerpb/BUILD.bazel | 3 + src/carnot/planner/plannerpb/service.pb.go | 617 +- src/carnot/planner/plannerpb/service.proto | 10 + src/carnot/planner/probes/BUILD.bazel | 1 + src/carnot/planner/probes/probes.cc | 28 + src/carnot/planner/probes/probes.h | 35 + .../planner/probes/tracepoint_generator.cc | 18 +- .../planner/probes/tracepoint_generator.h | 6 + src/carnot/planner/test_utils.h | 312 + src/carnot/planpb/plan.pb.go | 616 +- src/carnot/planpb/plan.proto | 2 + src/carnot/planpb/test_proto.h | 66 + src/common/json/json.h | 21 + src/common/testing/protobuf.h | 4 +- src/common/uuid/uuid_utils.h | 4 + .../standalone_pem/file_source_manager.cc | 195 + .../standalone_pem/file_source_manager.h | 71 + .../standalone_pem/standalone_pem_manager.cc | 34 +- .../standalone_pem/standalone_pem_manager.h | 4 + .../standalone_pem/tracepoint_manager.cc | 5 +- .../standalone_pem/vizier_server.h | 46 +- .../px/pipeline_flow_graph/manifest.yaml | 4 + .../pipeline_flow_graph.pxl | 82 + .../px/pipeline_flow_graph/vis.json | 49 + src/shared/metadata/metadata_state.cc | 2 +- src/shared/metadata/metadata_state.h | 5 +- .../metadata/standalone_state_manager.h | 4 +- src/shared/metadata/state_manager.h | 4 +- src/shared/schema/utils.cc | 12 +- src/shared/schema/utils.h | 7 +- src/stirling/BUILD.bazel | 1 + src/stirling/core/BUILD.bazel | 1 + src/stirling/core/info_class_manager.cc | 6 +- src/stirling/core/info_class_manager.h | 10 + src/stirling/core/info_class_manager_test.cc | 1 + src/stirling/core/source_connector.cc | 2 +- src/stirling/proto/stirling.proto | 1 + .../source_connectors/file_source/BUILD.bazel | 60 + .../file_source/file_source_connector.cc | 287 + .../file_source/file_source_connector.h | 87 + .../file_source/file_source_connector_test.cc | 82 + .../file_source/stirling_fs_test.cc | 225 + .../file_source/testdata/kern.log | 5 + .../file_source/testdata/test.json | 10 + .../file_source/testdata/unsupported.json | 1 + .../stirling_error/BUILD.bazel | 3 +- .../stirling_error/sink_results_table.h | 51 + .../stirling_error/stirling_error_bpf_test.cc | 92 + .../stirling_error_connector.cc | 19 +- .../stirling_error/stirling_error_connector.h | 7 +- .../stirling_error/stream_status_table.h | 51 + .../stirling_error/testdata/test.json | 10 + .../stirling_error/testdata/unsupported.json | 1 + src/stirling/stirling.cc | 186 +- src/stirling/stirling.h | 4 + src/stirling/testing/common.h | 2 +- src/stirling/testing/overloads.h | 10 + src/stirling/testing/stirling_mock.h | 5 + src/stirling/utils/monitor.cc | 11 + src/stirling/utils/monitor.h | 14 + src/table_store/schema/relation.cc | 14 + src/table_store/schema/relation.h | 4 + src/table_store/schemapb/schema.pb.go | 165 +- src/table_store/schemapb/schema.proto | 2 + .../internal/store_with_row_accounting.h | 44 +- src/table_store/table/table.cc | 310 +- src/table_store/table/table.h | 571 +- src/table_store/table/table_benchmark.cc | 32 +- src/table_store/table/table_store.cc | 2 +- src/table_store/table/table_store_test.cc | 10 +- src/table_store/table/table_test.cc | 343 +- src/table_store/table/tablets_group.cc | 2 +- src/table_store/table/tablets_group_test.cc | 4 +- src/table_store/test_utils.h | 2 +- src/ui/src/utils/pxl.ts | 2 + src/vizier/funcs/context/vizier_context.h | 9 +- src/vizier/funcs/md_udtfs/md_udtfs.cc | 2 + src/vizier/funcs/md_udtfs/md_udtfs_impl.h | 154 +- src/vizier/messages/messagespb/BUILD.bazel | 3 + src/vizier/messages/messagespb/messages.pb.go | 2070 ++- src/vizier/messages/messagespb/messages.proto | 32 + .../services/agent/kelvin/kelvin_manager.h | 1 + .../services/agent/pem/file_source_manager.cc | 234 + .../services/agent/pem/file_source_manager.h | 73 + src/vizier/services/agent/pem/pem_manager.cc | 25 +- src/vizier/services/agent/pem/pem_manager.h | 2 + .../services/agent/pem/tracepoint_manager.cc | 6 +- .../services/agent/shared/manager/BUILD.bazel | 1 + .../agent/shared/manager/heartbeat.cc | 3 +- .../services/agent/shared/manager/heartbeat.h | 21 + .../agent/shared/manager/heartbeat_test.cc | 6 +- .../services/agent/shared/manager/manager.cc | 18 +- .../services/agent/shared/manager/manager.h | 2 + .../shared/manager/relation_info_manager.cc | 3 + .../manager/relation_info_manager_test.cc | 8 +- src/vizier/services/metadata/BUILD.bazel | 1 + .../services/metadata/controllers/BUILD.bazel | 3 + .../controllers/agent_topic_listener.go | 44 +- .../controllers/agent_topic_listener_test.go | 102 +- .../controllers/file_source/BUILD.bazel | 74 + .../controllers/file_source/file_source.go | 375 + .../file_source/file_source_store.go | 309 + .../file_source/file_source_store_test.go | 364 + .../file_source/file_source_test.go | 528 + .../metadata/controllers/file_source/mock.go | 21 + .../controllers/file_source/mock/BUILD.bazel | 29 + .../file_source/mock/mock_file_source.gen.go | 277 + .../metadata/controllers/message_bus.go | 12 +- .../services/metadata/controllers/server.go | 186 +- .../metadata/controllers/server_test.go | 55 +- .../services/metadata/metadata_server.go | 11 +- .../services/metadata/metadatapb/BUILD.bazel | 3 + .../metadata/metadatapb/service.pb.go | 10458 ++++++++++------ .../metadata/metadatapb/service.proto | 64 + .../services/metadata/storepb/BUILD.bazel | 3 + .../services/metadata/storepb/store.pb.go | 1177 +- .../services/metadata/storepb/store.proto | 25 + .../query_broker/controllers/BUILD.bazel | 1 + .../query_broker/controllers/errors.go | 4 + .../controllers/mutation_executor.go | 155 +- .../controllers/query_executor.go | 7 +- .../controllers/query_executor_test.go | 4 +- .../query_broker/controllers/server.go | 10 +- .../query_broker/controllers/server_test.go | 12 +- .../query_broker/query_broker_server.go | 3 +- .../script_runner/script_runner.go | 7 +- .../query_broker/tracker/agents_info.go | 6 +- .../services/shared/agentpb/agent.pb.go | 159 +- .../services/shared/agentpb/agent.proto | 1 + 200 files changed, 19736 insertions(+), 5414 deletions(-) create mode 100644 demos/log-generator/log-generator.yaml create mode 100644 src/carnot/funcs/builtins/pipeline_ops.cc create mode 100644 src/carnot/funcs/builtins/pipeline_ops.h create mode 100644 src/carnot/planner/file_source/BUILD.bazel create mode 100644 src/carnot/planner/file_source/file_source.cc create mode 100644 src/carnot/planner/file_source/file_source.h create mode 100644 src/carnot/planner/file_source/file_source_test.cc create mode 100644 src/carnot/planner/file_source/ir/BUILD.bazel create mode 100755 src/carnot/planner/file_source/ir/logical.pb.go create mode 100644 src/carnot/planner/file_source/ir/logical.proto create mode 100644 src/carnot/planner/file_source/log_module.cc create mode 100644 src/carnot/planner/file_source/log_module.h create mode 100644 src/experimental/standalone_pem/file_source_manager.cc create mode 100644 src/experimental/standalone_pem/file_source_manager.h create mode 100644 src/pxl_scripts/px/pipeline_flow_graph/manifest.yaml create mode 100644 src/pxl_scripts/px/pipeline_flow_graph/pipeline_flow_graph.pxl create mode 100644 src/pxl_scripts/px/pipeline_flow_graph/vis.json create mode 100644 src/stirling/source_connectors/file_source/BUILD.bazel create mode 100644 src/stirling/source_connectors/file_source/file_source_connector.cc create mode 100644 src/stirling/source_connectors/file_source/file_source_connector.h create mode 100644 src/stirling/source_connectors/file_source/file_source_connector_test.cc create mode 100644 src/stirling/source_connectors/file_source/stirling_fs_test.cc create mode 100644 src/stirling/source_connectors/file_source/testdata/kern.log create mode 100644 src/stirling/source_connectors/file_source/testdata/test.json create mode 100644 src/stirling/source_connectors/file_source/testdata/unsupported.json create mode 100644 src/stirling/source_connectors/stirling_error/sink_results_table.h create mode 100644 src/stirling/source_connectors/stirling_error/stream_status_table.h create mode 100644 src/stirling/source_connectors/stirling_error/testdata/test.json create mode 100644 src/stirling/source_connectors/stirling_error/testdata/unsupported.json create mode 100644 src/vizier/services/agent/pem/file_source_manager.cc create mode 100644 src/vizier/services/agent/pem/file_source_manager.h create mode 100644 src/vizier/services/metadata/controllers/file_source/BUILD.bazel create mode 100644 src/vizier/services/metadata/controllers/file_source/file_source.go create mode 100644 src/vizier/services/metadata/controllers/file_source/file_source_store.go create mode 100644 src/vizier/services/metadata/controllers/file_source/file_source_store_test.go create mode 100644 src/vizier/services/metadata/controllers/file_source/file_source_test.go create mode 100644 src/vizier/services/metadata/controllers/file_source/mock.go create mode 100644 src/vizier/services/metadata/controllers/file_source/mock/BUILD.bazel create mode 100644 src/vizier/services/metadata/controllers/file_source/mock/mock_file_source.gen.go diff --git a/.arclint b/.arclint index 560c165400b..54d7f829f54 100644 --- a/.arclint +++ b/.arclint @@ -23,6 +23,7 @@ "(^src/stirling/bpf_tools/bcc_bpf/system-headers)", "(^src/stirling/mysql/testing/.*\\.json$)", "(^src/stirling/obj_tools/testdata/go/test_go_binary.go)", + "(^src/stirling/source_connectors/file_source/testdata/test.json$)", "(^src/stirling/source_connectors/socket_tracer/protocols/http2/testing/go_grpc_client/main.go$)", "(^src/stirling/source_connectors/socket_tracer/protocols/http2/testing/go_grpc_server/main.go$)", "(^src/stirling/utils/testdata/config$)", diff --git a/.bazelrc b/.bazelrc index 86182129958..8d48f599d7b 100644 --- a/.bazelrc +++ b/.bazelrc @@ -3,6 +3,9 @@ # Use strict action env to prevent leaks of env vars. build --incompatible_strict_action_env +# Use cache +# build --disk_cache=/tmp/bazel/cache # must not be merged dev only settng + # Only pass through GH_API_KEY for stamped builds. # This is still not ideal as it still busts the cache of stamped builds. build:stamp --stamp diff --git a/BUILD.bazel b/BUILD.bazel index 177a71158a6..874f7e13e5e 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,3 +1,6 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@px//bazel:pl_build_system.bzl", "pl_go_binary") + # Copyright 2018- The Pixie Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,6 +59,7 @@ gazelle( # gazelle:resolve go px.dev/pixie/src/carnot/docspb //src/carnot/docspb:docs_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planner/compilerpb //src/carnot/planner/compilerpb:compiler_status_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planner/distributedpb //src/carnot/planner/distributedpb:distributed_plan_pl_go_proto +# gazelle:resolve go px.dev/pixie/src/carnot/planner/file_source/ir //src/carnot/planner/file_source/ir:logical_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb //src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planner/plannerpb //src/carnot/planner/plannerpb:service_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planpb //src/carnot/planpb:plan_pl_go_proto @@ -216,3 +220,16 @@ filegroup( srcs = ["go.sum"], visibility = ["//visibility:public"], ) + +go_library( + name = "pixie_lib", + srcs = ["gosym_tab_experiment.go"], + importpath = "px.dev/pixie", + visibility = ["//visibility:private"], +) + +pl_go_binary( + name = "pixie", + embed = [":pixie_lib"], + visibility = ["//visibility:public"], +) diff --git a/WORKSPACE b/WORKSPACE index f375888b5fe..0097fee36c5 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -204,7 +204,7 @@ bind( ) # gazelle:repo bazel_gazelle -# Gazelle depes need to be loaded last to make sure they don't override our dependencies. +# Gazelle deps need to be loaded last to make sure they don't override our dependencies. # The first one wins when it comes to package declaration. load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") diff --git a/demos/log-generator/log-generator.yaml b/demos/log-generator/log-generator.yaml new file mode 100644 index 00000000000..ac05a56118b --- /dev/null +++ b/demos/log-generator/log-generator.yaml @@ -0,0 +1,89 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: px-log-generator +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-config + namespace: px-log-generator +data: + vector.toml: | + [sources.demo] + type = "demo_logs" + format = "json" + + [sinks.json_output] + type = "file" + inputs = ["demo"] + path = "/var/log/px-log-generator.json" + encoding.codec = "json" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-logrotate-config + namespace: px-log-generator +data: + logrotate.conf: | + /var/log/px-log-generator.json { + size 30M + copytruncate + rotate 5 + compress + missingok + notifempty + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: vector + namespace: px-log-generator +spec: + selector: + matchLabels: + app: vector + template: + metadata: + labels: + app: vector + spec: + volumes: + - name: log-storage + hostPath: + path: /var/log + type: Directory + - name: logrotate-config + configMap: + name: vector-logrotate-config + - name: config-volume + configMap: + name: vector-config + initContainers: + - name: cleanup + image: busybox + command: ["/bin/sh", "-c", "truncate -s0 /var/log/px-log-generator.json"] + volumeMounts: + - name: log-storage + mountPath: /var/log + containers: + - name: vector + image: timberio/vector@sha256:f8933ff1a3ec08df45abc6130947938d98dc85792a25592ec1aa6fe83a7f562c # 0.44.0-debian + args: ["--config", "/etc/vector/vector.toml"] + volumeMounts: + - name: config-volume + mountPath: /etc/vector + - name: log-storage + mountPath: /var/log + - name: logrotate + image: vitess/logrotate@sha256:ba0f99827d0e2d0bda86230ff6666e75383d93babcbc6c803c4d41396214f312 # v21.0.2-bookworm + volumeMounts: + - name: logrotate-config + mountPath: /vt/logrotate.conf + subPath: logrotate.conf + - name: log-storage + mountPath: /var/log + terminationGracePeriodSeconds: 10 + restartPolicy: Always diff --git a/skaffold/skaffold_vizier.yaml b/skaffold/skaffold_vizier.yaml index 2b6218a8c7d..439cda8c41c 100644 --- a/skaffold/skaffold_vizier.yaml +++ b/skaffold/skaffold_vizier.yaml @@ -8,37 +8,43 @@ build: bazel: target: //src/vizier/services/agent/pem:pem_image.tar args: - - --compilation_mode=dbg + - --config=x86_64_sysroot + - --compilation_mode=opt - image: vizier-kelvin_image context: . bazel: target: //src/vizier/services/agent/kelvin:kelvin_image.tar args: - - --compilation_mode=dbg + - --config=x86_64_sysroot + - --compilation_mode=opt - image: vizier-metadata_server_image context: . bazel: target: //src/vizier/services/metadata:metadata_server_image.tar args: - - --compilation_mode=dbg + - --config=x86_64_sysroot + - --compilation_mode=opt - image: vizier-query_broker_server_image context: . bazel: target: //src/vizier/services/query_broker:query_broker_server_image.tar args: - - --compilation_mode=dbg + - --config=x86_64_sysroot + - --compilation_mode=opt - image: vizier-cloud_connector_server_image context: . bazel: target: //src/vizier/services/cloud_connector:cloud_connector_server_image.tar args: - - --compilation_mode=dbg + - --config=x86_64_sysroot + - --compilation_mode=opt - image: vizier-cert_provisioner_image context: . bazel: target: //src/utils/cert_provisioner:cert_provisioner_image.tar args: - - --compilation_mode=dbg + - --config=x86_64_sysroot + - --compilation_mode=opt tagPolicy: dateTime: {} local: diff --git a/src/api/go/pxapi/examples/standalone_pem_example/example.go b/src/api/go/pxapi/examples/standalone_pem_example/example.go index 64e1e3b10da..3b3247e11dd 100644 --- a/src/api/go/pxapi/examples/standalone_pem_example/example.go +++ b/src/api/go/pxapi/examples/standalone_pem_example/example.go @@ -30,18 +30,66 @@ import ( // Define PxL script with one table output. var ( + stream = ` +import px +df = px.DataFrame('http_events') +px.display(df.stream()) +` pxl = ` import px +import pxlog +table = 'vector.json' +f = '/home/ddelnano/code/pixie-worktree/vector.json' +pxlog.FileSource(f, table, '5m') +df = px.DataFrame(table) -# Look at the http_events. -df = px.DataFrame(table='http_events') - -# Grab the command line from the metadata. -df.cmdline = px.upid_to_cmdline(df.upid) - -# Limit to the first 10. -df = df.head(10) - +px.display(df)` + bpftrace = ` +import pxtrace +import px +# Adapted from https://github.com/iovisor/bpftrace/blob/master/tools/tcpretrans.bt +program = """ +// tcpretrans.bt Trace or count TCP retransmits +// For Linux, uses bpftrace and eBPF. +// +// Copyright (c) 2018 Dale Hamel. +// Licensed under the Apache License, Version 2.0 (the "License") +#include +#include +kprobe:tcp_retransmit_skb +{ + $sk = (struct sock *)arg0; + $inet_family = $sk->__sk_common.skc_family; + $AF_INET = (uint16) 2; + $AF_INET6 = (uint16) 10; + if ($inet_family == $AF_INET || $inet_family == $AF_INET6) { + if ($inet_family == $AF_INET) { + $daddr = ntop($sk->__sk_common.skc_daddr); + $saddr = ntop($sk->__sk_common.skc_rcv_saddr); + } else { + $daddr = ntop($sk->__sk_common.skc_v6_daddr.in6_u.u6_addr8); + $saddr = ntop($sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr8); + } + $sport = $sk->__sk_common.skc_num; + $dport = $sk->__sk_common.skc_dport; + // Destination port is big endian, it must be flipped + $dport = ($dport >> 8) | (($dport << 8) & 0x00FF00); + printf(\"time_:%llu src_ip:%s src_port:%d dst_ip:%s dst_port:%d\", + nsecs, + $saddr, + $sport, + $daddr, + $dport); + } +} +""" +table_name = 'tcp_retransmits_table' +pxtrace.UpsertTracepoint('tcp_retranmits_probe', + table_name, + program, + pxtrace.kprobe(), + "2m") +df = px.DataFrame(table=table_name, select=['time_', 'src_ip', 'src_port', 'dst_ip', 'dst_port']) px.display(df)` ) diff --git a/src/api/go/pxapi/vizier.go b/src/api/go/pxapi/vizier.go index ef5b0bcdfcb..88c5404a583 100644 --- a/src/api/go/pxapi/vizier.go +++ b/src/api/go/pxapi/vizier.go @@ -20,6 +20,7 @@ package pxapi import ( "context" + "strings" "px.dev/pixie/src/api/go/pxapi/errdefs" "px.dev/pixie/src/api/proto/vizierpb" @@ -40,6 +41,7 @@ func (v *VizierClient) ExecuteScript(ctx context.Context, pxl string, mux TableM ClusterID: v.vizierID, QueryStr: pxl, EncryptionOptions: v.encOpts, + Mutation: strings.Contains(pxl, "import pxlog") || strings.Contains(pxl, "import pxtrace"), } origCtx := ctx ctx, cancel := context.WithCancel(ctx) diff --git a/src/carnot/carnot.cc b/src/carnot/carnot.cc index a466bb5194d..d3a0dc25947 100644 --- a/src/carnot/carnot.cc +++ b/src/carnot/carnot.cc @@ -378,9 +378,9 @@ Status CarnotImpl::ExecutePlan(const planpb::Plan& logical_plan, const sole::uui int64_t total_time_ns = stats->TotalExecTime(); int64_t self_time_ns = stats->SelfExecTime(); LOG(INFO) << absl::Substitute( - "self_time:$1\ttotal_time: $2\tbytes_output: $3\trows_output: $4\tnode_id:$0", + "self_time:$1\ttotal_time: $2\tbytes_input: $3\tbytes_output: $4\trows_input: $5\trows_output: $6\tnode_id:$0", node_name, PrettyDuration(self_time_ns), PrettyDuration(total_time_ns), - stats->bytes_output, stats->rows_output); + stats->bytes_input, stats->bytes_output, stats->rows_input, stats->rows_output); queryresultspb::OperatorExecutionStats* stats_pb = agent_operator_exec_stats.add_operator_execution_stats(); diff --git a/src/carnot/carnot_test.cc b/src/carnot/carnot_test.cc index 9d32031bfc4..3ea11080844 100644 --- a/src/carnot/carnot_test.cc +++ b/src/carnot/carnot_test.cc @@ -211,7 +211,7 @@ px.display(df, 'range_output'))pxl"; std::vector col0_out1; std::vector col1_out1; std::vector col2_out1; - table_store::Table::Cursor cursor(big_table_.get()); + table_store::Cursor cursor(big_table_.get()); auto batch = cursor.GetNextRowBatch({0}).ConsumeValueOrDie(); for (int64_t i = 0; i < batch->ColumnAt(0)->length(); i++) { if (CarnotTestUtils::big_test_col1[i].val >= 2 && CarnotTestUtils::big_test_col1[i].val < 12) { diff --git a/src/carnot/exec/BUILD.bazel b/src/carnot/exec/BUILD.bazel index 228b352501c..1af775011ba 100644 --- a/src/carnot/exec/BUILD.bazel +++ b/src/carnot/exec/BUILD.bazel @@ -226,6 +226,7 @@ pl_cc_test( deps = [ ":cc_library", ":test_utils", + "//src/common/testing/event:cc_library", "//src/carnot/planpb:plan_testutils", "@com_github_apache_arrow//:arrow", ], @@ -296,6 +297,7 @@ pl_cc_test( ":exec_node_test_helpers", ":test_utils", "//src/carnot/planpb:plan_testutils", + "//src/common/testing/event:cc_library", "@com_github_apache_arrow//:arrow", "@com_github_grpc_grpc//:grpc++_test", ], diff --git a/src/carnot/exec/exec_graph_test.cc b/src/carnot/exec/exec_graph_test.cc index d5c7abb8d89..d578dbac57c 100644 --- a/src/carnot/exec/exec_graph_test.cc +++ b/src/carnot/exec/exec_graph_test.cc @@ -38,6 +38,7 @@ #include "src/carnot/udf/base.h" #include "src/carnot/udf/registry.h" #include "src/carnot/udf/udf.h" +#include "src/common/testing/event/simulated_time_system.h" #include "src/common/testing/testing.h" #include "src/shared/types/arrow_adapter.h" #include "src/shared/types/types.h" @@ -77,6 +78,12 @@ class BaseExecGraphTest : public ::testing::Test { exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); } std::unique_ptr func_registry_; @@ -150,7 +157,7 @@ TEST_P(ExecGraphExecuteTest, execute) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = Table::Create("test", rel); + auto table = table_store::HotColdTable::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -175,6 +182,12 @@ TEST_P(ExecGraphExecuteTest, execute) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); EXPECT_OK(exec_state_->AddScalarUDF( 0, "add", std::vector({types::DataType::INT64, types::DataType::FLOAT64}))); @@ -187,11 +200,14 @@ TEST_P(ExecGraphExecuteTest, execute) { /* collect_exec_node_stats */ false, calls_to_generate); EXPECT_OK(e.Execute()); + auto stats = e.GetStats(); + EXPECT_EQ(stats.bytes_processed, 85); + EXPECT_EQ(stats.rows_processed, 5); auto output_table = exec_state_->table_store()->GetTable("output"); std::vector out_in1 = {4.8, 16.4, 26.4}; std::vector out_in2 = {14.8, 12.4}; - table_store::Table::Cursor cursor(output_table); + table_store::Cursor cursor(output_table); EXPECT_TRUE(cursor.GetNextRowBatch({0}).ConsumeValueOrDie()->ColumnAt(0)->Equals( types::ToArrow(out_in1, arrow::default_memory_pool()))); EXPECT_TRUE(cursor.GetNextRowBatch({0}).ConsumeValueOrDie()->ColumnAt(0)->Equals( @@ -229,7 +245,7 @@ TEST_F(ExecGraphTest, execute_time) { table_store::schema::Relation rel( {types::DataType::TIME64NS, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = Table::Create("test", rel); + auto table = table_store::HotColdTable::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {types::Time64NSValue(1), types::Time64NSValue(2), @@ -256,6 +272,12 @@ TEST_F(ExecGraphTest, execute_time) { auto exec_state_ = std::make_unique( func_registry.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); EXPECT_OK(exec_state_->AddScalarUDF( 0, "add", std::vector({types::DataType::INT64, types::DataType::FLOAT64}))); @@ -272,7 +294,7 @@ TEST_F(ExecGraphTest, execute_time) { auto output_table = exec_state_->table_store()->GetTable("output"); std::vector out_in1 = {4.8, 16.4, 26.4}; std::vector out_in2 = {14.8, 12.4}; - table_store::Table::Cursor cursor(output_table); + table_store::Cursor cursor(output_table); EXPECT_TRUE(cursor.GetNextRowBatch({0}).ConsumeValueOrDie()->ColumnAt(0)->Equals( types::ToArrow(out_in1, arrow::default_memory_pool()))); EXPECT_TRUE(cursor.GetNextRowBatch({0}).ConsumeValueOrDie()->ColumnAt(0)->Equals( @@ -298,7 +320,7 @@ TEST_F(ExecGraphTest, two_limits_dont_interfere) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = Table::Create("test", rel); + auto table = table_store::HotColdTable::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -323,6 +345,12 @@ TEST_F(ExecGraphTest, two_limits_dont_interfere) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); ExecutionGraph e; auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), @@ -335,8 +363,8 @@ TEST_F(ExecGraphTest, two_limits_dont_interfere) { std::vector out_col1 = {1, 2}; std::vector out_col2 = {true, false}; std::vector out_col3 = {1.4, 6.2}; - table_store::Table::Cursor cursor1(output_table1); - table_store::Table::Cursor cursor2(output_table2); + table_store::Cursor cursor1(output_table1); + table_store::Cursor cursor2(output_table2); auto out_rb1 = cursor1.GetNextRowBatch(std::vector({0, 1, 2})).ConsumeValueOrDie(); auto out_rb2 = cursor2.GetNextRowBatch(std::vector({0, 1, 2})).ConsumeValueOrDie(); @@ -366,7 +394,7 @@ TEST_F(ExecGraphTest, limit_w_multiple_srcs) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = Table::Create("test", rel); + auto table = table_store::HotColdTable::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -391,6 +419,12 @@ TEST_F(ExecGraphTest, limit_w_multiple_srcs) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); ExecutionGraph e; auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), @@ -402,7 +436,7 @@ TEST_F(ExecGraphTest, limit_w_multiple_srcs) { std::vector out_col1 = {1, 2}; std::vector out_col2 = {true, false}; std::vector out_col3 = {1.4, 6.2}; - table_store::Table::Cursor cursor(output_table); + table_store::Cursor cursor(output_table); auto out_rb = cursor.GetNextRowBatch(std::vector({0, 1, 2})).ConsumeValueOrDie(); EXPECT_TRUE(out_rb->ColumnAt(0)->Equals(types::ToArrow(out_col1, arrow::default_memory_pool()))); EXPECT_TRUE(out_rb->ColumnAt(1)->Equals(types::ToArrow(out_col2, arrow::default_memory_pool()))); @@ -427,7 +461,7 @@ TEST_F(ExecGraphTest, two_sequential_limits) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = Table::Create("test", rel); + auto table = table_store::HotColdTable::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -453,6 +487,12 @@ TEST_F(ExecGraphTest, two_sequential_limits) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); ExecutionGraph e; auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), @@ -464,7 +504,7 @@ TEST_F(ExecGraphTest, two_sequential_limits) { std::vector out_col1 = {1, 2}; std::vector out_col2 = {true, false}; std::vector out_col3 = {1.4, 6.2}; - table_store::Table::Cursor cursor(output_table); + table_store::Cursor cursor(output_table); auto out_rb = cursor.GetNextRowBatch({0, 1, 2}).ConsumeValueOrDie(); EXPECT_TRUE(out_rb->ColumnAt(0)->Equals(types::ToArrow(out_col1, arrow::default_memory_pool()))); EXPECT_TRUE(out_rb->ColumnAt(1)->Equals(types::ToArrow(out_col2, arrow::default_memory_pool()))); @@ -490,7 +530,7 @@ TEST_F(ExecGraphTest, execute_with_two_limits) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = Table::Create("test", rel); + auto table = table_store::HotColdTable::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -516,6 +556,12 @@ TEST_F(ExecGraphTest, execute_with_two_limits) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); ExecutionGraph e; auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), @@ -526,14 +572,179 @@ TEST_F(ExecGraphTest, execute_with_two_limits) { auto output_table_1 = exec_state_->table_store()->GetTable("output1"); auto output_table_2 = exec_state_->table_store()->GetTable("output2"); std::vector out_in1 = {1.4, 6.2}; - table_store::Table::Cursor cursor1(output_table_1); + table_store::Cursor cursor1(output_table_1); EXPECT_TRUE(cursor1.GetNextRowBatch({2}).ConsumeValueOrDie()->ColumnAt(0)->Equals( types::ToArrow(out_in1, arrow::default_memory_pool()))); - table_store::Table::Cursor cursor2(output_table_2); + table_store::Cursor cursor2(output_table_2); EXPECT_TRUE(cursor2.GetNextRowBatch({2}).ConsumeValueOrDie()->ColumnAt(0)->Equals( types::ToArrow(out_in1, arrow::default_memory_pool()))); } +TEST_F(ExecGraphTest, execute_with_timed_sink_node_no_prior_results_table) { + planpb::PlanFragment pf_pb; + ASSERT_TRUE(TextFormat::MergeFromString(planpb::testutils::kPlanWithOTelExport, &pf_pb)); + std::shared_ptr plan_fragment_ = std::make_shared(1); + ASSERT_OK(plan_fragment_->Init(pf_pb)); + + auto plan_state = std::make_unique(func_registry_.get()); + + auto schema = std::make_shared(); + schema->AddRelation( + 1, table_store::schema::Relation( + std::vector( + {types::DataType::STRING, types::DataType::BOOLEAN, types::DataType::FLOAT64}), + std::vector({"a", "b", "c"}))); + + table_store::schema::Relation rel( + {types::DataType::STRING, types::DataType::BOOLEAN, types::DataType::FLOAT64}, + {"col1", "col2", "col3"}); + auto table = table_store::HotColdTable::Create("test", rel); + + auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); + std::vector col1_in1 = {"service a", "service b", "service c"}; + std::vector col2_in1 = {true, false, true}; + std::vector col3_in1 = {1.4, 6.2, 10.2}; + + EXPECT_OK(rb1.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); + EXPECT_OK(rb1.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); + EXPECT_OK(rb1.AddColumn(types::ToArrow(col3_in1, arrow::default_memory_pool()))); + EXPECT_OK(table->WriteRowBatch(rb1)); + + auto rb2 = RowBatch(RowDescriptor(rel.col_types()), 2); + std::vector col1_in2 = {"service a", "service b"}; + std::vector col2_in2 = {false, false}; + std::vector col3_in2 = {3.4, 1.2}; + EXPECT_OK(rb2.AddColumn(types::ToArrow(col1_in2, arrow::default_memory_pool()))); + EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); + EXPECT_OK(rb2.AddColumn(types::ToArrow(col3_in2, arrow::default_memory_pool()))); + EXPECT_OK(table->WriteRowBatch(rb2)); + + auto table_store = std::make_shared(); + table_store->AddTable("numbers", table); + auto exec_state_ = std::make_unique( + func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, + MockTraceStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); + + ExecutionGraph e; + auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), + /* collect_exec_node_stats */ false); + + EXPECT_OK(e.Execute()); + + auto output_table_1 = exec_state_->table_store()->GetTable("sink_results"); + EXPECT_NE(output_table_1, nullptr); + std::vector out1_in1 = {54}; + std::vector out1_in2 = {54}; + std::vector out1_in3 = {36}; + std::vector out2_in1 = {planpb::OperatorType::MEMORY_SOURCE_OPERATOR}; + std::vector out2_in2 = {planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR}; + std::vector out2_in3 = {planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR}; + table_store::Cursor cursor1(output_table_1); + auto rb_out1 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); + EXPECT_TRUE(rb_out1->ColumnAt(0)->Equals(types::ToArrow(out1_in1, arrow::default_memory_pool()))); + EXPECT_TRUE(rb_out1->ColumnAt(1)->Equals(types::ToArrow(out2_in1, arrow::default_memory_pool()))); + auto rb_out2 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); + EXPECT_TRUE(rb_out2->ColumnAt(0)->Equals(types::ToArrow(out1_in2, arrow::default_memory_pool()))); + EXPECT_TRUE(rb_out2->ColumnAt(1)->Equals(types::ToArrow(out2_in2, arrow::default_memory_pool()))); + auto rb_out3 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); + EXPECT_TRUE(rb_out3->ColumnAt(0)->Equals(types::ToArrow(out1_in3, arrow::default_memory_pool()))); + EXPECT_TRUE(rb_out3->ColumnAt(1)->Equals(types::ToArrow(out2_in3, arrow::default_memory_pool()))); +} + +TEST_F(ExecGraphTest, execute_with_timed_sink_node_prior_results_table) { + planpb::PlanFragment pf_pb; + ASSERT_TRUE(TextFormat::MergeFromString(planpb::testutils::kPlanWithOTelExport, &pf_pb)); + std::shared_ptr plan_fragment_ = std::make_shared(1); + ASSERT_OK(plan_fragment_->Init(pf_pb)); + + auto plan_state = std::make_unique(func_registry_.get()); + + auto schema = std::make_shared(); + schema->AddRelation( + 1, table_store::schema::Relation( + std::vector( + {types::DataType::STRING, types::DataType::BOOLEAN, types::DataType::FLOAT64}), + std::vector({"a", "b", "c"}))); + + table_store::schema::Relation rel( + {types::DataType::STRING, types::DataType::BOOLEAN, types::DataType::FLOAT64}, + {"col1", "col2", "col3"}); + auto table = table_store::HotColdTable::Create("test", rel); + + auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); + std::vector col1_in1 = {"service a", "service b", "service c"}; + std::vector col2_in1 = {true, false, true}; + std::vector col3_in1 = {1.4, 6.2, 10.2}; + + EXPECT_OK(rb1.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); + EXPECT_OK(rb1.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); + EXPECT_OK(rb1.AddColumn(types::ToArrow(col3_in1, arrow::default_memory_pool()))); + EXPECT_OK(table->WriteRowBatch(rb1)); + + auto rb2 = RowBatch(RowDescriptor(rel.col_types()), 2); + std::vector col1_in2 = {"service a", "service b"}; + std::vector col2_in2 = {false, false}; + std::vector col3_in2 = {3.4, 1.2}; + EXPECT_OK(rb2.AddColumn(types::ToArrow(col1_in2, arrow::default_memory_pool()))); + EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); + EXPECT_OK(rb2.AddColumn(types::ToArrow(col3_in2, arrow::default_memory_pool()))); + EXPECT_OK(table->WriteRowBatch(rb2)); + + std::vector sink_results_col_names = {"time_", "upid", "bytes_transferred", "destination", + "stream_id"}; + table_store::schema::Relation sink_results_rel( + {types::DataType::TIME64NS, types::DataType::UINT128, types::DataType::INT64, types::DataType::INT64, types::DataType::STRING}, + sink_results_col_names); + auto sink_results_table = table_store::HotColdTable::Create("sink_results", sink_results_rel); + + auto table_store = std::make_shared(); + table_store->AddTable("numbers", table); + table_store->AddTable("sink_results", sink_results_table); + auto exec_state_ = std::make_unique( + func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, + MockTraceStubGenerator, sole::uuid4(), nullptr); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); + + ExecutionGraph e; + auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), + /* collect_exec_node_stats */ false); + + EXPECT_OK(e.Execute()); + + auto output_table_1 = exec_state_->table_store()->GetTable("sink_results"); + EXPECT_NE(output_table_1, nullptr); + std::vector out1_in1 = {54}; + std::vector out1_in2 = {54}; + std::vector out1_in3 = {36}; + std::vector out2_in1 = {planpb::OperatorType::MEMORY_SOURCE_OPERATOR}; + std::vector out2_in2 = {planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR}; + std::vector out2_in3 = {planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR}; + table_store::Cursor cursor1(output_table_1); + auto rb_out1 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); + LOG(INFO) << rb_out1->DebugString(); + EXPECT_TRUE(rb_out1->ColumnAt(0)->Equals(types::ToArrow(out1_in1, arrow::default_memory_pool()))); + EXPECT_TRUE(rb_out1->ColumnAt(1)->Equals(types::ToArrow(out2_in1, arrow::default_memory_pool()))); + auto rb_out2 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); + LOG(INFO) << rb_out2->DebugString(); + EXPECT_TRUE(rb_out2->ColumnAt(0)->Equals(types::ToArrow(out1_in2, arrow::default_memory_pool()))); + EXPECT_TRUE(rb_out2->ColumnAt(1)->Equals(types::ToArrow(out2_in2, arrow::default_memory_pool()))); + auto rb_out3 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); + LOG(INFO) << rb_out3->DebugString(); + EXPECT_TRUE(rb_out3->ColumnAt(0)->Equals(types::ToArrow(out1_in3, arrow::default_memory_pool()))); + EXPECT_TRUE(rb_out3->ColumnAt(1)->Equals(types::ToArrow(out2_in3, arrow::default_memory_pool()))); +} + class YieldingExecGraphTest : public BaseExecGraphTest { protected: void SetUp() { SetUpExecState(); } @@ -703,6 +914,12 @@ class GRPCExecGraphTest : public ::testing::Test { exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr, grpc_router_.get()); + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); } void SetUpPlanFragment() { diff --git a/src/carnot/exec/exec_node.h b/src/carnot/exec/exec_node.h index 34c692c61ce..764c865229c 100644 --- a/src/carnot/exec/exec_node.h +++ b/src/carnot/exec/exec_node.h @@ -18,6 +18,7 @@ #pragma once +#include #include #include #include @@ -28,6 +29,18 @@ #include "src/common/perf/perf.h" #include "src/table_store/table_store.h" +namespace px::carnot::exec { +// Forward declaration so enum_range can be specialized. +enum class SinkResultsDestType : uint64_t; + +} // namespace px::carot::exec + +template <> +struct magic_enum::customize::enum_range { + static constexpr int min = 1000; + static constexpr int max = 11000; +}; + namespace px { namespace carnot { namespace exec { @@ -127,10 +140,29 @@ struct ExecNodeStats { absl::flat_hash_map extra_info; }; +enum class SinkResultsDestType : uint64_t { + amqp_events = 10001, // TODO(ddelnano): This is set to not collide with the planpb::OperatorType enum + cql_events, + dns_events, + http_events, + kafka_events, // Won't work since table is suffixed with ".beta" + mongodb_events, + mux_events, + mysql_events, + nats_events, // Won't work since table is suffixed with ".beta" + pgsql_events, + redis_events, +}; + /** * This is the base class for the execution nodes in Carnot. */ class ExecNode { + const std::string kContextKey = "mutation_id"; + const std::string kSinkResultsTableName = "sink_results"; + const std::vector sink_results_col_names = {"time_", "upid", "bytes_transferred", + "destination", "stream_id"}; + public: ExecNode() = delete; virtual ~ExecNode() = default; @@ -143,9 +175,27 @@ class ExecNode { * @return */ Status Init(const plan::Operator& plan_node, - const table_store::schema::RowDescriptor& output_descriptor, - std::vector input_descriptors, - bool collect_exec_stats = false) { + const table_store::schema::RowDescriptor& output_descriptor, + std::vector input_descriptors, + bool collect_exec_stats = false) { + auto op_type = plan_node.op_type(); + // TODO(ddelnano): Replace this with a template based compile time check + // to ensure that there can't be segfaults on the subsequent static_casts + if (op_type == planpb::MEMORY_SOURCE_OPERATOR || op_type == planpb::GRPC_SINK_OPERATOR || + op_type == planpb::MEMORY_SINK_OPERATOR || op_type == planpb::OTEL_EXPORT_SINK_OPERATOR) { + const auto* sink_op = static_cast(&plan_node); + context_ = sink_op->context(); + auto op_type = plan_node.op_type(); + destination_ = static_cast(op_type); + if (op_type == planpb::MEMORY_SOURCE_OPERATOR) { + const auto* memory_source_op = static_cast(&plan_node); + auto table_name = memory_source_op->TableName(); + auto protocol_events = magic_enum::enum_cast(table_name); + if (protocol_events.has_value()) { + destination_ = static_cast(protocol_events.value()); + } + } + } is_initialized_ = true; output_descriptor_ = std::make_unique(output_descriptor); input_descriptors_ = input_descriptors; @@ -160,6 +210,9 @@ class ExecNode { */ Status Prepare(ExecState* exec_state) { DCHECK(is_initialized_); + if (context_.find(kContextKey) != context_.end()) { + SetUpStreamResultsTable(exec_state); + } return PrepareImpl(exec_state); } @@ -211,7 +264,7 @@ class ExecNode { * @return The Status of consumption. */ Status ConsumeNext(ExecState* exec_state, const table_store::schema::RowBatch& rb, - size_t parent_index) { + size_t parent_index) { DCHECK(is_initialized_); DCHECK(type() == ExecNodeType::kSinkNode || type() == ExecNodeType::kProcessingNode); if (rb.eos() && !rb.eow()) { @@ -222,6 +275,8 @@ class ExecNode { stats_->ResumeTotalTimer(); PX_RETURN_IF_ERROR(ConsumeNextImpl(exec_state, rb, parent_index)); stats_->StopTotalTimer(); + PX_RETURN_IF_ERROR( + RecordSinkResults(rb, exec_state->time_now(), exec_state->GetAgentUPID().value())); return Status::OK(); } @@ -282,7 +337,8 @@ class ExecNode { * @param rb The row batch to send. * @return Status of children execution. */ - Status SendRowBatchToChildren(ExecState* exec_state, const table_store::schema::RowBatch& rb) { + Status SendRowBatchToChildren(ExecState* exec_state, + const table_store::schema::RowBatch& rb) { stats_->ResumeChildTimer(); for (size_t i = 0; i < children_.size(); ++i) { PX_RETURN_IF_ERROR(children_[i]->ConsumeNext(exec_state, rb, parent_ids_for_children_[i])); @@ -293,10 +349,16 @@ class ExecNode { DCHECK(!sent_eos_); sent_eos_ = true; } + PX_RETURN_IF_ERROR( + RecordSinkResults(rb, exec_state->time_now(), exec_state->GetAgentUPID().value())); return Status::OK(); } - explicit ExecNode(ExecNodeType type) : type_(type) {} + explicit ExecNode(ExecNodeType type) + : type_(type), + rel_({types::DataType::TIME64NS, types::DataType::UINT128, types::DataType::INT64, + types::DataType::INT64, types::DataType::STRING}, + sink_results_col_names) {} // Defines the protected implementations of the non-virtual interface functions // defined above. @@ -321,6 +383,43 @@ class ExecNode { bool sent_eos_ = false; private: + void SetUpStreamResultsTable(ExecState* exec_state) { + auto sink_results = exec_state->table_store()->GetTable(kSinkResultsTableName); + if (sink_results != nullptr) { + table_ = sink_results; + } else { + auto table = table_store::HotColdTable::Create(kSinkResultsTableName, rel_); + exec_state->table_store()->AddTable(kSinkResultsTableName, table); + table_ = table.get(); + } + } + + Status RecordSinkResults(const table_store::schema::RowBatch& rb, + const types::Time64NSValue time_now, const types::UInt128Value upid) { + if (table_ != nullptr && context_.find(kContextKey) != context_.end()) { + auto mutation_id = context_[kContextKey]; + std::vector col1_in1 = {time_now}; + std::vector col2_in1 = {upid}; + std::vector col3_in1 = {rb.NumBytes()}; + std::vector col4_in1 = {destination_}; + std::vector col5_in1 = {mutation_id}; + auto rb_sink_stats = + table_store::schema::RowBatch(table_store::schema::RowDescriptor(rel_.col_types()), 1); + PX_RETURN_IF_ERROR( + rb_sink_stats.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); + PX_RETURN_IF_ERROR( + rb_sink_stats.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); + PX_RETURN_IF_ERROR( + rb_sink_stats.AddColumn(types::ToArrow(col3_in1, arrow::default_memory_pool()))); + PX_RETURN_IF_ERROR( + rb_sink_stats.AddColumn(types::ToArrow(col4_in1, arrow::default_memory_pool()))); + PX_RETURN_IF_ERROR( + rb_sink_stats.AddColumn(types::ToArrow(col5_in1, arrow::default_memory_pool()))); + PX_RETURN_IF_ERROR(table_->WriteRowBatch(rb_sink_stats)); + } + return Status::OK(); + } + // The stats of this exec node. std::unique_ptr stats_; // Unowned reference to the children. Must remain valid for the duration of query. @@ -334,6 +433,16 @@ class ExecNode { ExecNodeType type_; // Whether this node has been initialized. bool is_initialized_ = false; + + // The context key, value pairs passed to the operator node. + // This is currently used to store the mutation_id. + std::map context_; + + // The operator type of the current node + uint64_t destination_; + + table_store::Table* table_; + table_store::schema::Relation rel_; }; /** diff --git a/src/carnot/exec/exec_state.h b/src/carnot/exec/exec_state.h index 444d9298d06..2ecb5713918 100644 --- a/src/carnot/exec/exec_state.h +++ b/src/carnot/exec/exec_state.h @@ -73,8 +73,9 @@ class ExecState { udf::Registry* func_registry, std::shared_ptr table_store, const ResultSinkStubGenerator& stub_generator, const MetricsStubGenerator& metrics_stub_generator, - const TraceStubGenerator& trace_stub_generator, const LogsStubGenerator& logs_stub_generator, - const sole::uuid& query_id, udf::ModelPool* model_pool, GRPCRouter* grpc_router = nullptr, + const TraceStubGenerator& trace_stub_generator, + const LogsStubGenerator& logs_stub_generator, const sole::uuid& query_id, + udf::ModelPool* model_pool, GRPCRouter* grpc_router = nullptr, std::function add_auth_func = [](grpc::ClientContext*) {}, ExecMetrics* exec_metrics = nullptr) : func_registry_(func_registry), @@ -87,7 +88,8 @@ class ExecState { model_pool_(model_pool), grpc_router_(grpc_router), add_auth_to_grpc_client_context_func_(add_auth_func), - exec_metrics_(exec_metrics) {} + exec_metrics_(exec_metrics), + time_now_(px::CurrentTimeNS()) {} ~ExecState() { if (grpc_router_ != nullptr) { @@ -211,6 +213,8 @@ class ExecState { metadata_state_ = metadata_state; } + md::UPID GetAgentUPID() const { return metadata_state_->agent_upid(); } + GRPCRouter* grpc_router() { return grpc_router_; } void AddAuthToGRPCClientContext(grpc::ClientContext* ctx) { @@ -220,6 +224,8 @@ class ExecState { ExecMetrics* exec_metrics() { return exec_metrics_; } + types::Time64NSValue time_now() const { return time_now_; } + private: udf::Registry* func_registry_; std::shared_ptr table_store_; diff --git a/src/carnot/exec/grpc_sink_node_benchmark.cc b/src/carnot/exec/grpc_sink_node_benchmark.cc index 96707f0d896..77447969f47 100644 --- a/src/carnot/exec/grpc_sink_node_benchmark.cc +++ b/src/carnot/exec/grpc_sink_node_benchmark.cc @@ -76,7 +76,8 @@ void BM_GRPCSinkNodeSplitting(benchmark::State& state) { px::carnot::exec::GRPCSinkNode node; auto op_proto = px::carnot::planpb::testutils::CreateTestGRPCSink2PB(); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(op_proto.grpc_sink_op()); auto num_rows = 1024; diff --git a/src/carnot/exec/grpc_sink_node_test.cc b/src/carnot/exec/grpc_sink_node_test.cc index 62d6a2e2c12..f4b398ecae4 100644 --- a/src/carnot/exec/grpc_sink_node_test.cc +++ b/src/carnot/exec/grpc_sink_node_test.cc @@ -18,6 +18,7 @@ #include "src/carnot/exec/grpc_sink_node.h" +#include #include #include @@ -162,7 +163,8 @@ query_result { TEST_F(GRPCSinkNodeTest, internal_result) { auto op_proto = planpb::testutils::CreateTestGRPCSink1PB(); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -294,7 +296,8 @@ query_result { TEST_F(GRPCSinkNodeTest, external_result) { auto op_proto = planpb::testutils::CreateTestGRPCSink2PB(); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -352,7 +355,8 @@ TEST_F(GRPCSinkNodeTest, external_result) { TEST_F(GRPCSinkNodeTest, check_connection) { auto op_proto = planpb::testutils::CreateTestGRPCSink2PB(); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -392,7 +396,8 @@ TEST_F(GRPCSinkNodeTest, check_connection) { TEST_F(GRPCSinkNodeTest, update_connection_time) { auto op_proto = planpb::testutils::CreateTestGRPCSink2PB(); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -444,7 +449,8 @@ class GRPCSinkNodeSplitTest : public GRPCSinkNodeTest, TEST_P(GRPCSinkNodeSplitTest, break_up_batches) { auto op_proto = planpb::testutils::CreateTestGRPCSink1PB(); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(op_proto.grpc_sink_op()); auto test_case = GetParam(); @@ -652,7 +658,8 @@ INSTANTIATE_TEST_SUITE_P(SplitBatchesTest, GRPCSinkNodeSplitTest, TEST_F(GRPCSinkNodeTest, retry_failed_writes) { auto op_proto = planpb::testutils::CreateTestGRPCSink1PB(); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -724,7 +731,8 @@ TEST_F(GRPCSinkNodeTest, retry_failed_writes) { TEST_F(GRPCSinkNodeTest, check_connection_after_eos) { auto op_proto = planpb::testutils::CreateTestGRPCSink2PB(); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); diff --git a/src/carnot/exec/memory_sink_node.cc b/src/carnot/exec/memory_sink_node.cc index 6f0fb54c5e9..910b70c3f30 100644 --- a/src/carnot/exec/memory_sink_node.cc +++ b/src/carnot/exec/memory_sink_node.cc @@ -62,7 +62,8 @@ Status MemorySinkNode::PrepareImpl(ExecState* exec_state_) { col_names.push_back(plan_node_->ColumnName(i)); } - table_ = Table::Create(TableName(), Relation(input_descriptor_->types(), col_names)); + table_ = table_store::HotColdTable::Create(TableName(), + Relation(input_descriptor_->types(), col_names)); exec_state_->table_store()->AddTable(plan_node_->TableName(), table_); return Status::OK(); diff --git a/src/carnot/exec/memory_sink_node_test.cc b/src/carnot/exec/memory_sink_node_test.cc index e2587dbf132..a318375b249 100644 --- a/src/carnot/exec/memory_sink_node_test.cc +++ b/src/carnot/exec/memory_sink_node_test.cc @@ -85,7 +85,7 @@ TEST_F(MemorySinkNodeTest, basic) { false, 0); auto table = exec_state_->table_store()->GetTable("cpu_15s"); - table_store::Table::Cursor cursor(table); + table_store::Cursor cursor(table); auto batch_or_s = cursor.GetNextRowBatch({0, 1}); EXPECT_OK(batch_or_s); auto batch = batch_or_s.ConsumeValueOrDie(); @@ -104,7 +104,7 @@ TEST_F(MemorySinkNodeTest, basic) { .Close(); // Update stop spec of the cursor to include the new row batch. - cursor.UpdateStopSpec(table_store::Table::Cursor::StopSpec{}); + cursor.UpdateStopSpec(table_store::Cursor::StopSpec{}); batch_or_s = cursor.GetNextRowBatch({0, 1}); EXPECT_OK(batch_or_s); batch = batch_or_s.ConsumeValueOrDie(); @@ -147,7 +147,7 @@ TEST_F(MemorySinkNodeTest, zero_row_row_batch_not_eos) { .Close(); auto table = exec_state_->table_store()->GetTable("cpu_15s"); - table_store::Table::Cursor cursor(table); + table_store::Cursor cursor(table); auto batch_or_s = cursor.GetNextRowBatch({0, 1}); EXPECT_OK(batch_or_s); auto batch = batch_or_s.ConsumeValueOrDie(); diff --git a/src/carnot/exec/memory_source_node.cc b/src/carnot/exec/memory_source_node.cc index 97ad0513b50..2c9f02df14e 100644 --- a/src/carnot/exec/memory_source_node.cc +++ b/src/carnot/exec/memory_source_node.cc @@ -32,8 +32,8 @@ namespace px { namespace carnot { namespace exec { -using StartSpec = Table::Cursor::StartSpec; -using StopSpec = Table::Cursor::StopSpec; +using StartSpec = table_store::Cursor::StartSpec; +using StopSpec = table_store::Cursor::StopSpec; std::string MemorySourceNode::DebugStringImpl() { return absl::Substitute("Exec::MemorySourceNode: ", plan_node_->TableName(), @@ -85,7 +85,7 @@ Status MemorySourceNode::OpenImpl(ExecState* exec_state) { stop_spec.type = StopSpec::StopType::CurrentEndOfTable; } } - cursor_ = std::make_unique(table_, start_spec, stop_spec); + cursor_ = std::make_unique(table_, start_spec, stop_spec); return Status::OK(); } diff --git a/src/carnot/exec/memory_source_node.h b/src/carnot/exec/memory_source_node.h index ccb059827f3..edbea4375d5 100644 --- a/src/carnot/exec/memory_source_node.h +++ b/src/carnot/exec/memory_source_node.h @@ -60,7 +60,7 @@ class MemorySourceNode : public SourceNode { // Whether this memory source will stream future results. bool streaming_ = false; - std::unique_ptr cursor_; + std::unique_ptr cursor_; std::unique_ptr plan_node_; table_store::Table* table_ = nullptr; diff --git a/src/carnot/exec/memory_source_node_test.cc b/src/carnot/exec/memory_source_node_test.cc index df86c58c23c..418de849ee5 100644 --- a/src/carnot/exec/memory_source_node_test.cc +++ b/src/carnot/exec/memory_source_node_test.cc @@ -59,7 +59,8 @@ class MemorySourceNodeTest : public ::testing::Test { {"col1", "time_"}); int64_t compaction_size = 2 * sizeof(bool) + 2 * sizeof(int64_t); - cpu_table_ = std::make_shared("cpu", rel, 128 * 1024, compaction_size); + cpu_table_ = + std::make_shared("cpu", rel, 128 * 1024, compaction_size); exec_state_->table_store()->AddTable("cpu", cpu_table_); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); @@ -76,7 +77,7 @@ class MemorySourceNodeTest : public ::testing::Test { EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); EXPECT_OK(cpu_table_->WriteRowBatch(rb2)); - exec_state_->table_store()->AddTable("empty", Table::Create("empty", rel)); + exec_state_->table_store()->AddTable("empty", table_store::HotColdTable::Create("empty", rel)); } std::shared_ptr
cpu_table_; @@ -237,7 +238,7 @@ class MemorySourceNodeTabletTest : public ::testing::Test { rel = table_store::schema::Relation({types::DataType::BOOLEAN, types::DataType::TIME64NS}, {"col1", "time_"}); - std::shared_ptr
tablet = Table::Create(table_name_, rel); + std::shared_ptr
tablet = table_store::HotColdTable::Create(table_name_, rel); AddValuesToTable(tablet.get()); exec_state_->table_store()->AddTable(tablet, table_name_, table_id_, tablet_id_); @@ -296,7 +297,7 @@ TEST_F(MemorySourceNodeTabletTest, basic_tablet_test) { TEST_F(MemorySourceNodeTabletTest, multiple_tablet_test) { types::TabletID new_tablet_id = "456"; EXPECT_NE(tablet_id_, new_tablet_id); - std::shared_ptr
new_tablet = Table::Create(tablet_id_, rel); + std::shared_ptr
new_tablet = table_store::HotColdTable::Create(tablet_id_, rel); auto wrapper_batch_1 = std::make_unique(); auto col_wrapper_1 = std::make_shared(0); @@ -458,7 +459,8 @@ class ParamMemorySourceNodeTest : public ::testing::Test, std::vector{types::DataType::TIME64NS}, std::vector{"time_"}); int64_t compaction_size = 2 * sizeof(int64_t); - cpu_table_ = std::make_shared
("cpu", *rel_, 128 * 1024, compaction_size); + cpu_table_ = + std::make_shared("cpu", *rel_, 128 * 1024, compaction_size); exec_state_->table_store()->AddTable("cpu", cpu_table_); planpb::Operator op; diff --git a/src/carnot/exec/otel_export_sink_node.cc b/src/carnot/exec/otel_export_sink_node.cc index 77da9f12d0b..3ba8ec2a297 100644 --- a/src/carnot/exec/otel_export_sink_node.cc +++ b/src/carnot/exec/otel_export_sink_node.cc @@ -465,7 +465,8 @@ Status OTelExportSinkNode::ConsumeLogs(ExecState* exec_state, const RowBatch& rb AddAttributes(log->mutable_attributes(), log_pb.attributes(), rb, row_idx); auto time_col = rb.ColumnAt(log_pb.time_column_index()).get(); - log->set_time_unix_nano(types::GetValueFromArrowArray(time_col, row_idx)); + log->set_time_unix_nano( + types::GetValueFromArrowArray(time_col, row_idx)); if (log_pb.observed_time_column_index() >= 0) { auto observed_time_col = rb.ColumnAt(log_pb.observed_time_column_index()).get(); log->set_observed_time_unix_nano( diff --git a/src/carnot/exec/otel_export_sink_node_test.cc b/src/carnot/exec/otel_export_sink_node_test.cc index 9aeee55103e..37ebfe3ea5b 100644 --- a/src/carnot/exec/otel_export_sink_node_test.cc +++ b/src/carnot/exec/otel_export_sink_node_test.cc @@ -42,6 +42,7 @@ #include "src/carnot/planpb/plan.pb.h" #include "src/carnot/planpb/test_proto.h" #include "src/carnot/udf/registry.h" +#include "src/common/testing/event/simulated_time_system.h" #include "src/common/testing/testing.h" #include "src/common/uuid/uuid_utils.h" #include "src/shared/types/types.h" @@ -101,6 +102,14 @@ class OTelExportSinkNodeTest : public ::testing::Test { return std::move(logs_mock_unique_); }, sole::uuid4(), nullptr, nullptr, [](grpc::ClientContext*) {}); + + auto time_system = std::make_unique( + std::chrono::steady_clock::now(), std::chrono::system_clock::now()); + + auto metadata_state = std::make_shared( + "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", + time_system.get()); + exec_state_->set_metadata_state(metadata_state); } protected: @@ -136,7 +145,8 @@ metrics { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_pb_txt, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); RowDescriptor input_rd({types::TIME64NS, types::FLOAT64}); RowDescriptor output_rd({}); @@ -185,7 +195,8 @@ metrics { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_pb_txt, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); RowDescriptor input_rd({types::TIME64NS, types::FLOAT64, types::STRING}); RowDescriptor output_rd({}); @@ -250,7 +261,8 @@ TEST_P(OTelMetricsTest, process_data) { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(tc.operator_proto, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); // Load a RowBatch to get the Input RowDescriptor. @@ -1033,7 +1045,8 @@ TEST_P(OTelSpanTest, process_data) { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(tc.operator_proto, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); // Load a RowBatch to get the Input RowDescriptor. @@ -1530,7 +1543,8 @@ TEST_P(SpanIDTests, generate_ids) { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(tc.operator_proto, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); // Load a RowBatch to get the Input RowDescriptor. @@ -1686,7 +1700,8 @@ spans { parent_span_id_column_index: -1 })pb"; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_proto, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); std::string row_batch = R"pb( cols { time64ns_data { data: 10 data: 20 } } @@ -1724,7 +1739,8 @@ metrics { gauge { int_column_index: 1 } })pb"; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_proto, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); std::string row_batch = R"pb( cols { time64ns_data { data: 10 data: 11 } } @@ -1774,7 +1790,8 @@ spans { parent_span_id_column_index: -1 })pb"; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_proto, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); std::string row_batch = R"pb( cols { time64ns_data { data: 10 data: 20 } } @@ -1825,7 +1842,8 @@ metrics { gauge { int_column_index: 1 } })pb"; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_proto, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); std::string row_batch = R"pb( cols { time64ns_data { data: 10 data: 11 } } @@ -1871,7 +1889,8 @@ TEST_P(OTelLogTest, process_data) { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(tc.operator_proto, &otel_sink_op)); - auto plan_node = std::make_unique(1); + std::map context; + auto plan_node = std::make_unique(1, context); auto s = plan_node->Init(otel_sink_op); // Load a RowBatch to get the Input RowDescriptor. diff --git a/src/carnot/exec/test_utils.h b/src/carnot/exec/test_utils.h index e2f6fae1289..e0958eaf8df 100644 --- a/src/carnot/exec/test_utils.h +++ b/src/carnot/exec/test_utils.h @@ -122,7 +122,7 @@ class CarnotTestUtils { static std::shared_ptr TestTable() { table_store::schema::Relation rel({types::DataType::FLOAT64, types::DataType::INT64}, {"col1", "col2"}); - auto table = table_store::Table::Create("test_table", rel); + auto table = table_store::HotColdTable::Create("test_table", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {0.5, 1.2, 5.3}; @@ -143,7 +143,7 @@ class CarnotTestUtils { static std::shared_ptr TestDuration64Table() { table_store::schema::Relation rel({types::DataType::INT64}, {"col1"}); - auto table = table_store::Table::Create("test_table", rel); + auto table = table_store::HotColdTable::Create("test_table", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -166,7 +166,7 @@ class CarnotTestUtils { types::DataType::INT64, types::DataType::STRING}, {"time_", "col2", "col3", "num_groups", "string_groups"}); - auto table = table_store::Table::Create("test_table", rel); + auto table = table_store::HotColdTable::Create("test_table", rel); for (const auto& pair : split_idx) { auto rb = RowBatch(RowDescriptor(rel.col_types()), pair.second - pair.first); @@ -227,7 +227,7 @@ class CarnotTestUtils { "read_bytes", "write_bytes", }); - auto table = table_store::Table::Create("process_table", rel); + auto table = table_store::HotColdTable::Create("process_table", rel); return table; } @@ -248,7 +248,7 @@ class CarnotTestUtils { "req_path", "req_body", "req_body_size", "resp_headers", "resp_status", "resp_message", "resp_body", "resp_body_size", "latency", }); - auto table = table_store::Table::Create("http_events_table", rel); + auto table = table_store::HotColdTable::Create("http_events_table", rel); return table; } }; diff --git a/src/carnot/funcs/builtins/builtins.cc b/src/carnot/funcs/builtins/builtins.cc index f871244bdaf..5f4b941c9b9 100644 --- a/src/carnot/funcs/builtins/builtins.cc +++ b/src/carnot/funcs/builtins/builtins.cc @@ -25,6 +25,7 @@ #include "src/carnot/funcs/builtins/ml_ops.h" #include "src/carnot/funcs/builtins/pii_ops.h" #include "src/carnot/funcs/builtins/pprof_ops.h" +#include "src/carnot/funcs/builtins/pipeline_ops.h" #include "src/carnot/funcs/builtins/regex_ops.h" #include "src/carnot/funcs/builtins/request_path_ops.h" #include "src/carnot/funcs/builtins/sql_ops.h" @@ -52,6 +53,7 @@ void RegisterBuiltinsOrDie(udf::Registry* registry) { RegisterPIIOpsOrDie(registry); RegisterURIOpsOrDie(registry); RegisterUtilOpsOrDie(registry); + RegisterPipelineOpsOrDie(registry); RegisterPProfOpsOrDie(registry); } diff --git a/src/carnot/funcs/builtins/pipeline_ops.cc b/src/carnot/funcs/builtins/pipeline_ops.cc new file mode 100644 index 00000000000..8528ad6dd29 --- /dev/null +++ b/src/carnot/funcs/builtins/pipeline_ops.cc @@ -0,0 +1,39 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ +#include +#include +#include + +#include +#include "src/carnot/funcs/builtins/pipeline_ops.h" + +namespace px { +namespace carnot { +namespace builtins { + +void RegisterPipelineOpsOrDie(udf::Registry* registry) { + CHECK(registry != nullptr); + /***************************************** + * Scalar UDFs. + *****************************************/ + registry->RegisterOrDie("pipeline_dest_to_name"); +} + +} // namespace builtins +} // namespace carnot +} // namespace px diff --git a/src/carnot/funcs/builtins/pipeline_ops.h b/src/carnot/funcs/builtins/pipeline_ops.h new file mode 100644 index 00000000000..eb479d4a083 --- /dev/null +++ b/src/carnot/funcs/builtins/pipeline_ops.h @@ -0,0 +1,83 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ +#pragma once + +#include "src/carnot/udf/registry.h" +#include "src/common/base/utils.h" +#include "src/shared/types/types.h" + +namespace px::carnot::builtins { +// Forward declaration so enum_range can be specialized. +enum class SinkResultsDestType : uint64_t; + +} // namespace px::carot::builtins + +template <> +struct magic_enum::customize::enum_range { + static constexpr int min = 1000; + static constexpr int max = 11000; +}; + + +namespace px { +namespace carnot { +namespace builtins { + +enum class SinkResultsDestType : uint64_t { + grpc_sink = 9100, + otel_export = 9200, + amqp_events = 10001, // TODO(ddelnano): This is set to not collide with the planpb::OperatorType enum + cql_events, + dns_events, + http_events, + kafka_events, // Won't work since table is suffixed with ".beta" + mongodb_events, + mux_events, + mysql_events, + nats_events, // Won't work since table is suffixed with ".beta" + pgsql_events, + redis_events, +}; + +class PipelineDestToName : public udf::ScalarUDF { + public: + StringValue Exec(FunctionContext*, Int64Value input) { + auto protocol_events = magic_enum::enum_cast(input.val); + if (!protocol_events.has_value()) { + return "unknown"; + } + return std::string(magic_enum::enum_name(protocol_events.value())); + } + + static udf::ScalarUDFDocBuilder Doc() { + return udf::ScalarUDFDocBuilder( + "Convert the destination ID from the sink_results table to a human-readable name.") + .Details("TBD") + .Example(R"doc( +df = px.DataFrame("sink_results) +df.dest = px.pipeline_dest_to_name(df.destination))doc") + .Arg("dest", "The destination enum to covert.") + .Returns("The human-readable name of the destination."); + } +}; + +void RegisterPipelineOpsOrDie(udf::Registry* registry); + +} // namespace builtins +} // namespace carnot +} // namespace px diff --git a/src/carnot/plan/operators.cc b/src/carnot/plan/operators.cc index bfdb43427f4..42f19f98033 100644 --- a/src/carnot/plan/operators.cc +++ b/src/carnot/plan/operators.cc @@ -45,6 +45,20 @@ namespace plan { using px::Status; +// enable_if std::is_base_of_v +template >> +std::unique_ptr CreateOperator(int64_t id, const TProto& pb, + std::map context) { + auto op = std::make_unique(id, context); + auto s = op->Init(pb); + // On init failure, return null; + if (!s.ok()) { + LOG(ERROR) << "Failed to initialize operator with err: " << s.msg(); + return nullptr; + } + return op; +} + template std::unique_ptr CreateOperator(int64_t id, const TProto& pb) { auto op = std::make_unique(id); @@ -58,19 +72,21 @@ std::unique_ptr CreateOperator(int64_t id, const TProto& pb) { } std::unique_ptr Operator::FromProto(const planpb::Operator& pb, int64_t id) { + auto pb_context = pb.context(); + std::map context(pb_context.begin(), pb_context.end()); switch (pb.op_type()) { case planpb::MEMORY_SOURCE_OPERATOR: - return CreateOperator(id, pb.mem_source_op()); + return CreateOperator(id, pb.mem_source_op(), context); case planpb::MAP_OPERATOR: return CreateOperator(id, pb.map_op()); case planpb::AGGREGATE_OPERATOR: return CreateOperator(id, pb.agg_op()); case planpb::MEMORY_SINK_OPERATOR: - return CreateOperator(id, pb.mem_sink_op()); + return CreateOperator(id, pb.mem_sink_op(), context); case planpb::GRPC_SOURCE_OPERATOR: return CreateOperator(id, pb.grpc_source_op()); case planpb::GRPC_SINK_OPERATOR: - return CreateOperator(id, pb.grpc_sink_op()); + return CreateOperator(id, pb.grpc_sink_op(), context); case planpb::FILTER_OPERATOR: return CreateOperator(id, pb.filter_op()); case planpb::LIMIT_OPERATOR: @@ -84,7 +100,7 @@ std::unique_ptr Operator::FromProto(const planpb::Operator& pb, int64_ case planpb::EMPTY_SOURCE_OPERATOR: return CreateOperator(id, pb.empty_source_op()); case planpb::OTEL_EXPORT_SINK_OPERATOR: - return CreateOperator(id, pb.otel_sink_op()); + return CreateOperator(id, pb.otel_sink_op(), context); default: LOG(FATAL) << absl::Substitute("Unknown operator type: $0", magic_enum::enum_name(pb.op_type())); diff --git a/src/carnot/plan/operators.h b/src/carnot/plan/operators.h index 8586f6eb976..9a12712e264 100644 --- a/src/carnot/plan/operators.h +++ b/src/carnot/plan/operators.h @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -80,9 +81,30 @@ class Operator : public PlanNode { bool is_initialized_ = false; }; -class MemorySourceOperator : public Operator { +class SinkOperator : public Operator { public: - explicit MemorySourceOperator(int64_t id) : Operator(id, planpb::MEMORY_SOURCE_OPERATOR) {} + explicit SinkOperator(int64_t id, planpb::OperatorType op_type, + std::map context) + : Operator(id, op_type), context_(context) {} + + std::string DebugString() const override { return absl::StrCat("SinkOperator: ", id_); } + + StatusOr OutputRelation( + const table_store::schema::Schema& /*schema*/, const PlanState& /*state*/, + const std::vector& /*input_ids*/) const override { + return error::Unimplemented("Derived sink operator must implement OutputRelation"); + } + + std::map context() const { return context_; } + + protected: + std::map context_; +}; + +class MemorySourceOperator : public SinkOperator { + public: + explicit MemorySourceOperator(int64_t id, std::map context) + : SinkOperator(id, planpb::MEMORY_SOURCE_OPERATOR, context) {} ~MemorySourceOperator() override = default; StatusOr OutputRelation( const table_store::schema::Schema& schema, const PlanState& state, @@ -153,9 +175,10 @@ class AggregateOperator : public Operator { planpb::AggregateOperator pb_; }; -class MemorySinkOperator : public Operator { +class MemorySinkOperator : public SinkOperator { public: - explicit MemorySinkOperator(int64_t id) : Operator(id, planpb::MEMORY_SINK_OPERATOR) {} + explicit MemorySinkOperator(int64_t id, std::map context) + : SinkOperator(id, planpb::MEMORY_SINK_OPERATOR, context) {} ~MemorySinkOperator() override = default; StatusOr OutputRelation( @@ -185,9 +208,10 @@ class GRPCSourceOperator : public Operator { planpb::GRPCSourceOperator pb_; }; -class GRPCSinkOperator : public Operator { +class GRPCSinkOperator : public SinkOperator { public: - explicit GRPCSinkOperator(int64_t id) : Operator(id, planpb::GRPC_SINK_OPERATOR) {} + explicit GRPCSinkOperator(int64_t id, std::map context) + : SinkOperator(id, planpb::GRPC_SINK_OPERATOR, context) {} ~GRPCSinkOperator() override = default; StatusOr OutputRelation( @@ -359,9 +383,10 @@ class EmptySourceOperator : public Operator { std::vector column_idxs_; }; -class OTelExportSinkOperator : public Operator { +class OTelExportSinkOperator : public SinkOperator { public: - explicit OTelExportSinkOperator(int64_t id) : Operator(id, planpb::OTEL_EXPORT_SINK_OPERATOR) {} + explicit OTelExportSinkOperator(int64_t id, std::map context) + : SinkOperator(id, planpb::OTEL_EXPORT_SINK_OPERATOR, context) {} ~OTelExportSinkOperator() override = default; StatusOr OutputRelation( diff --git a/src/carnot/planner/cgo_export.cc b/src/carnot/planner/cgo_export.cc index cc80e3cc438..211292d251f 100644 --- a/src/carnot/planner/cgo_export.cc +++ b/src/carnot/planner/cgo_export.cc @@ -126,21 +126,21 @@ char* PlannerCompileMutations(PlannerPtr planner_ptr, const char* mutation_reque auto planner = reinterpret_cast(planner_ptr); - auto dynamic_trace_or_s = planner->CompileTrace(mutation_request_pb); - if (!dynamic_trace_or_s.ok()) { - return ExitEarly(dynamic_trace_or_s.status(), resultLen); + auto mutations_ir_or_s = planner->CompileTrace(mutation_request_pb); + if (!mutations_ir_or_s.ok()) { + return ExitEarly(mutations_ir_or_s.status(), resultLen); } - std::unique_ptr trace = - dynamic_trace_or_s.ConsumeValueOrDie(); + std::unique_ptr mutations = + mutations_ir_or_s.ConsumeValueOrDie(); // If the response is ok, then we can go ahead and set this up. CompileMutationsResponse mutations_response_pb; - WrapStatus(&mutations_response_pb, dynamic_trace_or_s.status()); + WrapStatus(&mutations_response_pb, mutations_ir_or_s.status()); PLANNER_RETURN_IF_ERROR(CompileMutationsResponse, resultLen, - trace->ToProto(&mutations_response_pb)); + mutations->ToProto(&mutations_response_pb)); - // Serialize the tracing program into bytes. + // Serialize the mutations into bytes. return PrepareResult(&mutations_response_pb, resultLen); } diff --git a/src/carnot/planner/cgo_export_test.cc b/src/carnot/planner/cgo_export_test.cc index eed16a9a972..9abace0d86a 100644 --- a/src/carnot/planner/cgo_export_test.cc +++ b/src/carnot/planner/cgo_export_test.cc @@ -278,6 +278,43 @@ TEST_F(PlannerExportTest, compile_delete_tracepoint) { EXPECT_THAT(mutations_response_pb, EqualsProto(kExpectedDeleteTracepointsMutationPb)); } +constexpr char kSingleFileSource[] = R"pxl( +import pxlog + +glob_pattern = 'test.json' +pxlog.FileSource(glob_pattern, 'test_table', '5m') +)pxl"; + +constexpr char kSingleFileSourceProgramPb[] = R"pxl( +glob_pattern: "test.json" +table_name: "test_table" +ttl { + seconds: 300 +} +)pxl"; + +TEST_F(PlannerExportTest, compile_file_source_def) { + planner_ = MakePlanner(); + int result_len; + std::string mutation_request; + plannerpb::CompileMutationsRequest req; + req.set_query_str(kSingleFileSource); + *(req.mutable_logical_planner_state()) = testutils::CreateTwoPEMsOneKelvinPlannerState(); + ASSERT_TRUE(req.SerializeToString(&mutation_request)); + auto interface_result = PlannerCompileMutations(planner_, mutation_request.c_str(), + mutation_request.length(), &result_len); + + ASSERT_GT(result_len, 0); + plannerpb::CompileMutationsResponse mutations_response_pb; + ASSERT_TRUE(mutations_response_pb.ParseFromString( + std::string(interface_result, interface_result + result_len))); + delete[] interface_result; + ASSERT_OK(mutations_response_pb.status()); + ASSERT_EQ(mutations_response_pb.mutations().size(), 1); + EXPECT_THAT(mutations_response_pb.mutations()[0].file_source(), + EqualsProto(kSingleFileSourceProgramPb)); +} + constexpr char kExportPxL[] = R"pxl(import px otel_df = 'placeholder' df = px.DataFrame('http_events', start_time='-5m') diff --git a/src/carnot/planner/compiler/BUILD.bazel b/src/carnot/planner/compiler/BUILD.bazel index 1298c0775a9..359d3518227 100644 --- a/src/carnot/planner/compiler/BUILD.bazel +++ b/src/carnot/planner/compiler/BUILD.bazel @@ -40,6 +40,7 @@ pl_cc_library( "//src/carnot/planner/compiler/optimizer:cc_library", "//src/carnot/planner/compiler_error_context:cc_library", "//src/carnot/planner/compiler_state:cc_library", + "//src/carnot/planner/file_source:cc_library", "//src/carnot/planner/ir:cc_library", "//src/carnot/planner/metadata:cc_library", "//src/carnot/planner/objects:cc_library", diff --git a/src/carnot/planner/compiler/ast_visitor.cc b/src/carnot/planner/compiler/ast_visitor.cc index 0047815780c..a4bfe1eb071 100644 --- a/src/carnot/planner/compiler/ast_visitor.cc +++ b/src/carnot/planner/compiler/ast_visitor.cc @@ -104,6 +104,8 @@ Status ASTVisitorImpl::SetupModules( PixieModule::Create(ir_graph_, compiler_state_, this, func_based_exec_, reserved_names_)); PX_ASSIGN_OR_RETURN((*module_handler_)[TraceModule::kTraceModuleObjName], TraceModule::Create(mutations_, this)); + PX_ASSIGN_OR_RETURN((*module_handler_)[LogModule::kLogModuleObjName], + LogModule::Create(mutations_, this)); PX_ASSIGN_OR_RETURN((*module_handler_)[ConfigModule::kConfigModuleObjName], ConfigModule::Create(mutations_, this)); for (const auto& [module_name, module_text] : module_name_to_pxl_map) { diff --git a/src/carnot/planner/compiler/ast_visitor.h b/src/carnot/planner/compiler/ast_visitor.h index 7d10e93a6ae..7984698ecb5 100644 --- a/src/carnot/planner/compiler/ast_visitor.h +++ b/src/carnot/planner/compiler/ast_visitor.h @@ -33,6 +33,7 @@ #include "src/carnot/funcs/builtins/math_ops.h" #include "src/carnot/planner/ast/ast_visitor.h" #include "src/carnot/planner/compiler_state/compiler_state.h" +#include "src/carnot/planner/file_source/log_module.h" #include "src/carnot/planner/ir/ast_utils.h" #include "src/carnot/planner/ir/ir.h" #include "src/carnot/planner/objects/dataframe.h" diff --git a/src/carnot/planner/compiler/graph_comparison.h b/src/carnot/planner/compiler/graph_comparison.h index c6f75b92037..5e0f8a5641c 100644 --- a/src/carnot/planner/compiler/graph_comparison.h +++ b/src/carnot/planner/compiler/graph_comparison.h @@ -261,7 +261,7 @@ struct PlanGraphMatcher { } virtual void DescribeTo(::std::ostream* os) const { - *os << "equals to text probobuf: " << expected_plan_.DebugString(); + *os << "equals to text protobuf: " << expected_plan_.DebugString(); } virtual void DescribeNegationTo(::std::ostream* os) const { diff --git a/src/carnot/planner/compiler/test_utils.h b/src/carnot/planner/compiler/test_utils.h index 2f65f616b50..616fb8593d8 100644 --- a/src/carnot/planner/compiler/test_utils.h +++ b/src/carnot/planner/compiler/test_utils.h @@ -768,6 +768,14 @@ class OperatorTests : public ::testing::Test { types::DataType::FLOAT64, types::DataType::FLOAT64}), std::vector({"count", "cpu0", "cpu1", "cpu2"})); } + // Used for testing propagation of context to children. + table_store::schema::Relation MakeRelationWithMutation() { + std::optional mutation = "mutation"; + return table_store::schema::Relation( + std::vector({types::DataType::INT64, types::DataType::FLOAT64, + types::DataType::FLOAT64, types::DataType::FLOAT64}), + std::vector({"count", "cpu0", "cpu1", "cpu2"}), mutation); + } // Same as MakeRelation, but has a time column. table_store::schema::Relation MakeTimeRelation() { return table_store::schema::Relation( diff --git a/src/carnot/planner/distributed/coordinator/coordinator.cc b/src/carnot/planner/distributed/coordinator/coordinator.cc index b437bdf8c37..ef468fbe130 100644 --- a/src/carnot/planner/distributed/coordinator/coordinator.cc +++ b/src/carnot/planner/distributed/coordinator/coordinator.cc @@ -194,8 +194,15 @@ StatusOr> CoordinatorImpl::CoordinateImpl(const remote_carnot->AddPlan(remote_plan); distributed_plan->AddPlan(std::move(remote_plan_uptr)); + auto remote_agent_id = remote_carnot->carnot_info().agent_id(); std::vector source_node_ids; for (const auto& [i, data_store_info] : Enumerate(data_store_nodes_)) { + auto agent_id = data_store_info.agent_id(); + // For cases where the remote agent also has a data store, we don't need to add a source. + // This ensures that the MemorySource will be executed locally without an unnecessary GRPCSink/Source pair. + if (agent_id == remote_agent_id) { + continue; + } PX_ASSIGN_OR_RETURN(int64_t source_node_id, distributed_plan->AddCarnot(data_store_info)); distributed_plan->AddEdge(source_node_id, remote_node_id); source_node_ids.push_back(source_node_id); diff --git a/src/carnot/planner/distributed/coordinator/coordinator_test.cc b/src/carnot/planner/distributed/coordinator/coordinator_test.cc index e864338b88b..b6466be90fe 100644 --- a/src/carnot/planner/distributed/coordinator/coordinator_test.cc +++ b/src/carnot/planner/distributed/coordinator/coordinator_test.cc @@ -62,6 +62,16 @@ class CoordinatorTest : public testutils::DistributedRulesTest { ASSERT_OK(rule.Execute(graph.get())); } + void MakeGraphWithMutation() { + auto mem_src = MakeMemSource(MakeRelationWithMutation()); + compiler_state_->relation_map()->emplace("table", MakeRelationWithMutation()); + graph->RecordMutationId({"mutation"}); + MakeMemSink(mem_src, "out"); + + ResolveTypesRule rule(compiler_state_.get()); + ASSERT_OK(rule.Execute(graph.get())); + } + void VerifyHasDataSourcePlan(IR* plan) { auto mem_src_nodes = plan->FindNodesOfType(IRNodeType::kMemorySource); ASSERT_EQ(mem_src_nodes.size(), 1); @@ -144,6 +154,48 @@ TEST_F(CoordinatorTest, three_pems_one_kelvin) { } } +// TODO(ddelnano): Finish this test +TEST_F(CoordinatorTest, three_pems_one_kelvin_with_mut) { + auto ps = LoadDistributedStatePb(kThreePEMsOneKelvinDistributedState); + auto coordinator = Coordinator::Create(compiler_state_.get(), ps).ConsumeValueOrDie(); + + MakeGraphWithMutation(); + auto physical_plan = coordinator->Coordinate(graph.get()).ConsumeValueOrDie(); + + auto topo_sort = physical_plan->dag().TopologicalSort(); + // Last item should be kelvin, id 0. + ASSERT_EQ(topo_sort.size(), 4); + ASSERT_EQ(topo_sort[3], 0); + + auto kelvin_instance = physical_plan->Get(0); + EXPECT_THAT(kelvin_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); + { + SCOPED_TRACE("three pems one kelvin -> " + + kelvin_instance->carnot_info().query_broker_address()); + VerifyKelvinMergerPlan(kelvin_instance->plan()); + } + + // Agents are 1,2,3. + for (int64_t i = 1; i <= 3; ++i) { + auto pem_instance = physical_plan->Get(i); + SCOPED_TRACE("three pems one kelvin -> " + pem_instance->carnot_info().query_broker_address()); + EXPECT_THAT(pem_instance->carnot_info().query_broker_address(), ContainsRegex("pem")); + auto plan = pem_instance->plan(); + VerifyPEMPlan(plan); + + auto grpc_sink = plan->FindNodesOfType(IRNodeType::kGRPCSink); + + EXPECT_EQ(1, grpc_sink.size()); + planpb::Operator op; + auto grpc_sink_ir = static_cast(grpc_sink[0]); + // This unit test doesn't trigger the UpdateSink/AddDestinationIDMap code path, so trigger + // manually so the internal GRPC sink ToProto function works. + grpc_sink_ir->AddDestinationIDMap(0, i); + EXPECT_OK(grpc_sink_ir->ToProto(&op, i)); + EXPECT_EQ(1, op.context().size()); + } +} + TEST_F(CoordinatorTest, one_pem_three_kelvin) { auto ps = LoadDistributedStatePb(kOnePEMThreeKelvinsDistributedState); auto coordinator = Coordinator::Create(compiler_state_.get(), ps).ConsumeValueOrDie(); @@ -157,14 +209,39 @@ TEST_F(CoordinatorTest, one_pem_three_kelvin) { auto kelvin_instance = physical_plan->Get(0); EXPECT_THAT(kelvin_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); { - SCOPED_TRACE("one pem one kelvin -> kelvin plan"); + SCOPED_TRACE("one pem three kelvin -> kelvin plan"); + VerifyKelvinMergerPlan(kelvin_instance->plan()); + } + + auto pem_instance = physical_plan->Get(1); + EXPECT_THAT(pem_instance->carnot_info().query_broker_address(), ContainsRegex("pem")); + { + SCOPED_TRACE("one pem three kelvin -> pem plan"); + VerifyPEMPlan(pem_instance->plan()); + } +} + +TEST_F(CoordinatorTest, three_pem_one_kelvin_all_has_data_store) { + auto ps = LoadDistributedStatePb(testutils::kThreePEMsOneKelvinAllHasDataStoreDistributedState); + auto coordinator = Coordinator::Create(compiler_state_.get(), ps).ConsumeValueOrDie(); + + MakeGraph(); + + auto physical_plan = coordinator->Coordinate(graph.get()).ConsumeValueOrDie(); + ASSERT_EQ(physical_plan->dag().nodes().size(), 5UL); + /* EXPECT_THAT(physical_plan->dag().TopologicalSort(), ElementsAre(3, 1, 2, 4, 0)); */ + + auto kelvin_instance = physical_plan->Get(0); + EXPECT_THAT(kelvin_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); + { + SCOPED_TRACE("one pem three kelvin -> kelvin plan"); VerifyKelvinMergerPlan(kelvin_instance->plan()); } auto pem_instance = physical_plan->Get(1); EXPECT_THAT(pem_instance->carnot_info().query_broker_address(), ContainsRegex("pem")); { - SCOPED_TRACE("one pem one kelvin -> pem plan"); + SCOPED_TRACE("one pem three kelvin -> pem plan"); VerifyPEMPlan(pem_instance->plan()); } } diff --git a/src/carnot/planner/distributed/coordinator/prune_unavailable_sources_rule.cc b/src/carnot/planner/distributed/coordinator/prune_unavailable_sources_rule.cc index 1af0e858da8..3b5b2f85dc1 100644 --- a/src/carnot/planner/distributed/coordinator/prune_unavailable_sources_rule.cc +++ b/src/carnot/planner/distributed/coordinator/prune_unavailable_sources_rule.cc @@ -73,8 +73,7 @@ StatusOr PruneUnavailableSourcesRule::MaybePruneMemorySource(MemorySourceI } bool PruneUnavailableSourcesRule::AgentSupportsMemorySources() { - return carnot_info_.has_data_store() && !carnot_info_.has_grpc_server() && - carnot_info_.processes_data(); + return carnot_info_.has_data_store() && carnot_info_.processes_data(); } bool PruneUnavailableSourcesRule::AgentHasTable(std::string table_name) { diff --git a/src/carnot/planner/distributed/distributed_plan/distributed_plan.cc b/src/carnot/planner/distributed/distributed_plan/distributed_plan.cc index 7fe66c7da83..2226005fabe 100644 --- a/src/carnot/planner/distributed/distributed_plan/distributed_plan.cc +++ b/src/carnot/planner/distributed/distributed_plan/distributed_plan.cc @@ -50,6 +50,10 @@ StatusOr DistributedPlan::ToProto() const { dest->set_grpc_address(exec_complete_address_); dest->set_ssl_targetname(exec_complete_ssl_targetname_); } + if (qb_address_to_plan_pb->find(carnot->QueryBrokerAddress()) != + qb_address_to_plan_pb->end()) { + return error::Internal(absl::Substitute("Distributed plan has multiple nodes with the '$0' query broker address.", carnot->QueryBrokerAddress())); + } (*qb_address_to_plan_pb)[carnot->QueryBrokerAddress()] = plan_proto; (*qb_address_to_dag_id_pb)[carnot->QueryBrokerAddress()] = i; diff --git a/src/carnot/planner/distributed/distributed_planner_test.cc b/src/carnot/planner/distributed/distributed_planner_test.cc index 28fee3533a3..fa4b0a8d0b7 100644 --- a/src/carnot/planner/distributed/distributed_planner_test.cc +++ b/src/carnot/planner/distributed/distributed_planner_test.cc @@ -213,6 +213,78 @@ TEST_F(DistributedPlannerTest, three_agents_one_kelvin) { EXPECT_THAT(grpc_sink_destinations, UnorderedElementsAreArray(grpc_source_ids)); } +TEST_F(DistributedPlannerTest, three_agents_with_participating_kelvin) { + auto mem_src = MakeMemSource(MakeRelation()); + compiler_state_->relation_map()->emplace("table", MakeRelation()); + MakeMemSink(mem_src, "out"); + + ResolveTypesRule rule(compiler_state_.get()); + ASSERT_OK(rule.Execute(graph.get())); + + distributedpb::DistributedState ps_pb = + LoadDistributedStatePb(testutils::kThreePEMsOneKelvinAllHasDataStoreDistributedState); + std::unique_ptr physical_planner = + DistributedPlanner::Create().ConsumeValueOrDie(); + std::unique_ptr physical_plan = + physical_planner->Plan(ps_pb, compiler_state_.get(), graph.get()).ConsumeValueOrDie(); + + ASSERT_OK(physical_plan->ToProto()); + auto topo_sort = physical_plan->dag().TopologicalSort(); + // Last item should be kelvin, id 0. + ASSERT_EQ(topo_sort.size(), 4); + ASSERT_EQ(topo_sort[3], 0); + + std::vector grpc_sink_destinations; + absl::flat_hash_set seen_plans; + for (int64_t i = 1; i <= 3; ++i) { + SCOPED_TRACE(absl::Substitute("agent id = $0", i)); + auto agent_instance = physical_plan->Get(i); + if (i != 4) { + EXPECT_THAT(agent_instance->carnot_info().query_broker_address(), ContainsRegex("pem")); + } else { + EXPECT_THAT(agent_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); + } + + if (seen_plans.contains(agent_instance->plan())) { + continue; + } + + seen_plans.insert(agent_instance->plan()); + std::vector grpc_sinks = + agent_instance->plan()->FindNodesOfType(IRNodeType::kGRPCSink); + ASSERT_EQ(grpc_sinks.size(), 1); + auto grpc_sink = static_cast(grpc_sinks[0]); + for (const auto& [agent_id, dest_id] : grpc_sink->agent_id_to_destination_id()) { + grpc_sink_destinations.push_back(dest_id); + } + } + + auto kelvin_instance = physical_plan->Get(0); + EXPECT_THAT(kelvin_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); + + std::vector unions = kelvin_instance->plan()->FindNodesOfType(IRNodeType::kUnion); + ASSERT_EQ(unions.size(), 1); + UnionIR* kelvin_union = static_cast(unions[0]); + ASSERT_EQ(kelvin_union->parents().size(), 4); + + std::vector grpc_source_ids; + std::vector memory_source_ids; + for (OperatorIR* union_parent : kelvin_union->parents()) { + if (union_parent->type() == IRNodeType::kGRPCSource) { + auto grpc_source = static_cast(union_parent); + grpc_source_ids.push_back(grpc_source->id()); + } else { + ASSERT_EQ(union_parent->type(), IRNodeType::kMemorySource); + memory_source_ids.push_back(union_parent->id()); + } + } + ASSERT_EQ(grpc_source_ids.size(), 3); + ASSERT_EQ(memory_source_ids.size(), 1); + + // Make sure that the destinations are setup properly. + EXPECT_THAT(grpc_sink_destinations, UnorderedElementsAreArray(grpc_source_ids)); +} + using DistributedPlannerUDTFTests = DistributedRulesTest; TEST_F(DistributedPlannerUDTFTests, UDTFOnlyOnPEMsDoesntRunOnKelvin) { uint32_t asid = 123; diff --git a/src/carnot/planner/distributed/distributed_stitcher_rules_test.cc b/src/carnot/planner/distributed/distributed_stitcher_rules_test.cc index 49879679256..34962fb8c9b 100644 --- a/src/carnot/planner/distributed/distributed_stitcher_rules_test.cc +++ b/src/carnot/planner/distributed/distributed_stitcher_rules_test.cc @@ -298,6 +298,64 @@ TEST_F(StitcherTest, three_pems_one_kelvin) { } } +TEST_F(StitcherTest, three_pems_with_participating_kelvin) { + auto ps = LoadDistributedStatePb(testutils::kThreePEMsOneKelvinAllHasDataStoreDistributedState); + auto physical_plan = MakeDistributedPlan(ps); + auto topo_sort = physical_plan->dag().TopologicalSort(); + ASSERT_EQ(topo_sort.size(), 5); + ASSERT_EQ(topo_sort[4], 0); + + CarnotInstance* kelvin = physical_plan->Get(0); + std::string kelvin_qb_address = "kelvin"; + ASSERT_EQ(kelvin->carnot_info().query_broker_address(), kelvin_qb_address); + + std::vector data_sources; + for (int64_t agent_id = 1; agent_id <= 4; ++agent_id) { + CarnotInstance* agent = physical_plan->Get(agent_id); + // Quick check to make sure agents are valid. + ASSERT_THAT(agent->carnot_info().query_broker_address(), HasSubstr("pem")); + data_sources.push_back(agent); + } + // Kelvin can be a data source sometimes. + data_sources.push_back(kelvin); + { + SCOPED_TRACE("three_pems_with_participating_kelvin"); + TestBeforeSetSourceGroupGRPCAddress(data_sources, {kelvin}); + } + + // Execute the address rule. + DistributedSetSourceGroupGRPCAddressRule rule; + auto node_changed_or_s = rule.Execute(physical_plan.get()); + ASSERT_OK(node_changed_or_s); + ASSERT_TRUE(node_changed_or_s.ConsumeValueOrDie()); + + { + SCOPED_TRACE("three_pems_with_participating_kelvin"); + TestGRPCAddressSet({kelvin}); + } + + // Associate the edges of the graph. + AssociateDistributedPlanEdgesRule distributed_edges_rule; + node_changed_or_s = distributed_edges_rule.Execute(physical_plan.get()); + ASSERT_OK(node_changed_or_s); + ASSERT_TRUE(node_changed_or_s.ConsumeValueOrDie()); + + { + SCOPED_TRACE("three_pems_with_participating_kelvin"); + TestGRPCBridgesWiring(data_sources, {kelvin}); + } + + DistributedIRRule distributed_grpc_source_conv_rule; + node_changed_or_s = distributed_grpc_source_conv_rule.Execute(physical_plan.get()); + ASSERT_OK(node_changed_or_s); + ASSERT_TRUE(node_changed_or_s.ConsumeValueOrDie()); + + { + SCOPED_TRACE("three_pems_with_participating_kelvin"); + TestGRPCBridgesExpandedCorrectly(data_sources, {kelvin}); + } +} + // Test to see whether we can stitch a graph to itself. TEST_F(StitcherTest, stitch_self_together_with_udtf) { auto ps = LoadDistributedStatePb(kOnePEMOneKelvinDistributedState); @@ -339,7 +397,7 @@ TEST_F(StitcherTest, stitch_self_together_with_udtf) { } // Test to see whether we can stitch a graph to itself. -TEST_F(StitcherTest, stitch_all_togther_with_udtf) { +TEST_F(StitcherTest, stitch_all_together_with_udtf) { auto ps = LoadDistributedStatePb(kOnePEMOneKelvinDistributedState); // px._Test_MDState() is an all agent so it should run on every pem and kelvin. auto physical_plan = CoordinateQuery("import px\npx.display(px._Test_MD_State())", ps); @@ -381,6 +439,8 @@ TEST_F(StitcherTest, stitch_all_togther_with_udtf) { // connected. auto kelvin_plan = kelvin->plan(); auto pem_plan = pem->plan(); + LOG(INFO) << "Kelvin plan: " << kelvin_plan->DebugString(); + LOG(INFO) << "PEM plan: " << pem_plan->DebugString(); auto kelvin_grpc_sinks = kelvin_plan->FindNodesThatMatch(InternalGRPCSink()); ASSERT_EQ(kelvin_grpc_sinks.size(), 1); diff --git a/src/carnot/planner/distributed/splitter/splitter.h b/src/carnot/planner/distributed/splitter/splitter.h index 5ba2a997dc3..42227c1a705 100644 --- a/src/carnot/planner/distributed/splitter/splitter.h +++ b/src/carnot/planner/distributed/splitter/splitter.h @@ -54,7 +54,7 @@ struct BlockingSplitPlan { std::unique_ptr before_blocking; // The plan that occcurs after and including blocking nodes. std::unique_ptr after_blocking; - // The that has both the before and after blocking nodes. + // The plan that has both the before and after blocking nodes. std::unique_ptr original_plan; }; diff --git a/src/carnot/planner/file_source/BUILD.bazel b/src/carnot/planner/file_source/BUILD.bazel new file mode 100644 index 00000000000..2d00258245f --- /dev/null +++ b/src/carnot/planner/file_source/BUILD.bazel @@ -0,0 +1,52 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("//bazel:pl_build_system.bzl", "pl_cc_binary", "pl_cc_library", "pl_cc_test") + +package(default_visibility = [ + "//src/carnot:__subpackages__", + "//src/experimental/standalone_pem:__subpackages__", # TODO(ddelnano): Is this needed? +]) + +pl_cc_library( + name = "cc_library", + srcs = glob( + [ + "*.cc", + "*.h", + ], + exclude = [ + "**/*_test.cc", + "**/*_test_utils.h", + ], + ), + hdrs = ["file_source.h"], + deps = [ + "//src/carnot/planner/objects:cc_library", + "//src/carnot/planner/probes:cc_library", + "//src/common/uuid:cc_library", # TODO(ddelnano): This may not be needed + ], +) + +pl_cc_test( + name = "file_source_test", + srcs = ["file_source_test.cc"], + deps = [ + ":cc_library", + "//src/carnot/planner:test_utils", + "//src/carnot/planner/compiler:cc_library", + ], +) diff --git a/src/carnot/planner/file_source/file_source.cc b/src/carnot/planner/file_source/file_source.cc new file mode 100644 index 00000000000..4e7c0e88a96 --- /dev/null +++ b/src/carnot/planner/file_source/file_source.cc @@ -0,0 +1,27 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/carnot/planner/file_source/file_source.h" + +namespace px { +namespace carnot { +namespace planner { +namespace compiler {} // namespace compiler +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/file_source/file_source.h b/src/carnot/planner/file_source/file_source.h new file mode 100644 index 00000000000..e15c1f734ac --- /dev/null +++ b/src/carnot/planner/file_source/file_source.h @@ -0,0 +1,37 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include "src/carnot/planner/objects/funcobject.h" + +namespace px { +namespace carnot { +namespace planner { +namespace compiler { + +class FileSourceIR { + /* public: */ + + /* private: */ +}; + +} // namespace compiler +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/file_source/file_source_test.cc b/src/carnot/planner/file_source/file_source_test.cc new file mode 100644 index 00000000000..1105a3b26d6 --- /dev/null +++ b/src/carnot/planner/file_source/file_source_test.cc @@ -0,0 +1,91 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/carnot/planner/compiler/ast_visitor.h" +#include "src/carnot/planner/compiler/test_utils.h" +#include "src/carnot/planner/probes/probes.h" + +namespace px { +namespace carnot { +namespace planner { +namespace compiler { +using ::testing::ContainsRegex; +using ::testing::Not; +using ::testing::UnorderedElementsAre; + +constexpr char kSingleFileSource[] = R"pxl( +import pxlog + +glob_pattern = 'test.json' +pxlog.FileSource(glob_pattern, 'test_table', '5m') +)pxl"; + +constexpr char kSingleFileSourceProgramPb[] = R"pxl( +glob_pattern: "test.json" +table_name: "test_table" +ttl { + seconds: 300 +} +)pxl"; + +class FileSourceCompilerTest : public ASTVisitorTest { + protected: + StatusOr> CompileFileSourceScript( + std::string_view query, const ExecFuncs& exec_funcs = {}) { + absl::flat_hash_set reserved_names; + for (const auto& func : exec_funcs) { + reserved_names.insert(func.output_table_prefix()); + } + auto func_based_exec = exec_funcs.size() > 0; + + Parser parser; + PX_ASSIGN_OR_RETURN(auto ast, parser.Parse(query)); + + std::shared_ptr ir = std::make_shared(); + std::shared_ptr mutation_ir = std::make_shared(); + + ModuleHandler module_handler; + PX_ASSIGN_OR_RETURN(auto ast_walker, compiler::ASTVisitorImpl::Create( + ir.get(), mutation_ir.get(), compiler_state_.get(), + &module_handler, func_based_exec, reserved_names, {})); + + PX_RETURN_IF_ERROR(ast_walker->ProcessModuleNode(ast)); + if (func_based_exec) { + PX_RETURN_IF_ERROR(ast_walker->ProcessExecFuncs(exec_funcs)); + } + return mutation_ir; + } +}; + +// TODO(ddelnano): Add test that verifies missing arguments provides a compiler error +// instead of the "Query should not be empty" error. There seems to be a bug where default +// arguments are not being handled correctly. + +TEST_F(FileSourceCompilerTest, parse_single_file_source) { + ASSERT_OK_AND_ASSIGN(auto mutation_ir, CompileFileSourceScript(kSingleFileSource)); + plannerpb::CompileMutationsResponse pb; + EXPECT_OK(mutation_ir->ToProto(&pb)); + ASSERT_EQ(pb.mutations_size(), 1); + EXPECT_THAT(pb.mutations()[0].file_source(), + testing::proto::EqualsProto(kSingleFileSourceProgramPb)); +} + +} // namespace compiler +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/file_source/ir/BUILD.bazel b/src/carnot/planner/file_source/ir/BUILD.bazel new file mode 100644 index 00000000000..759282f6c38 --- /dev/null +++ b/src/carnot/planner/file_source/ir/BUILD.bazel @@ -0,0 +1,41 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("//bazel:proto_compile.bzl", "pl_cc_proto_library", "pl_go_proto_library", "pl_proto_library") + +package(default_visibility = ["//src:__subpackages__"]) + +pl_proto_library( + name = "logical_pl_proto", + srcs = ["logical.proto"], + deps = [ + "@gogo_grpc_proto//gogoproto:gogo_pl_proto", + ], +) + +pl_cc_proto_library( + name = "logical_pl_cc_proto", + proto = ":logical_pl_proto", + deps = [ + "@gogo_grpc_proto//gogoproto:gogo_pl_cc_proto", + ], +) + +pl_go_proto_library( + name = "logical_pl_go_proto", + importpath = "px.dev/pixie/src/carnot/planner/file_source/ir", + proto = ":logical_pl_proto", +) diff --git a/src/carnot/planner/file_source/ir/logical.pb.go b/src/carnot/planner/file_source/ir/logical.pb.go new file mode 100755 index 00000000000..f424f8ec525 --- /dev/null +++ b/src/carnot/planner/file_source/ir/logical.pb.go @@ -0,0 +1,567 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: src/carnot/planner/file_source/ir/logical.proto + +package ir + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FileSourceDeployment struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GlobPattern string `protobuf:"bytes,2,opt,name=glob_pattern,json=globPattern,proto3" json:"glob_pattern,omitempty"` + TableName string `protobuf:"bytes,3,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + TTL *types.Duration `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (m *FileSourceDeployment) Reset() { *m = FileSourceDeployment{} } +func (*FileSourceDeployment) ProtoMessage() {} +func (*FileSourceDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_452b4826b1190f86, []int{0} +} +func (m *FileSourceDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileSourceDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FileSourceDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FileSourceDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSourceDeployment.Merge(m, src) +} +func (m *FileSourceDeployment) XXX_Size() int { + return m.Size() +} +func (m *FileSourceDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_FileSourceDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSourceDeployment proto.InternalMessageInfo + +func (m *FileSourceDeployment) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FileSourceDeployment) GetGlobPattern() string { + if m != nil { + return m.GlobPattern + } + return "" +} + +func (m *FileSourceDeployment) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *FileSourceDeployment) GetTTL() *types.Duration { + if m != nil { + return m.TTL + } + return nil +} + +func init() { + proto.RegisterType((*FileSourceDeployment)(nil), "px.carnot.planner.file_source.ir.FileSourceDeployment") +} + +func init() { + proto.RegisterFile("src/carnot/planner/file_source/ir/logical.proto", fileDescriptor_452b4826b1190f86) +} + +var fileDescriptor_452b4826b1190f86 = []byte{ + // 302 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0x42, 0x31, + 0x18, 0x85, 0x6f, 0x81, 0x68, 0x28, 0x4e, 0x37, 0x0c, 0x48, 0xe2, 0x2f, 0x3a, 0x31, 0xb5, 0x89, + 0x3a, 0x38, 0x13, 0xe2, 0x64, 0x8c, 0x41, 0x26, 0x17, 0xd2, 0x7b, 0x2d, 0x4d, 0x93, 0xd2, 0xff, + 0xa6, 0x94, 0x44, 0x37, 0x1f, 0xc1, 0x67, 0x70, 0xf2, 0x51, 0x1c, 0x19, 0x99, 0x8c, 0xf4, 0x2e, + 0x8e, 0x3c, 0x82, 0xb9, 0xbd, 0x98, 0xb8, 0xfd, 0xff, 0x39, 0xdf, 0x39, 0x39, 0x94, 0x2f, 0x5d, + 0xce, 0x73, 0xe1, 0x2c, 0x7a, 0x5e, 0x18, 0x61, 0xad, 0x74, 0x7c, 0xae, 0x8d, 0x9c, 0x2d, 0x71, + 0xe5, 0x72, 0xc9, 0xb5, 0xe3, 0x06, 0x95, 0xce, 0x85, 0x61, 0x85, 0x43, 0x8f, 0xe9, 0xa0, 0x78, + 0x66, 0x35, 0xcf, 0xf6, 0x3c, 0xfb, 0xc7, 0x33, 0xed, 0xfa, 0x5d, 0x85, 0x0a, 0x23, 0xcc, 0xab, + 0xab, 0xce, 0xf5, 0x41, 0x21, 0x2a, 0x23, 0x79, 0xfc, 0xb2, 0xd5, 0x9c, 0x3f, 0xad, 0x9c, 0xf0, + 0x1a, 0x6d, 0xed, 0x9f, 0xbf, 0x13, 0xda, 0xbd, 0xd1, 0x46, 0x3e, 0xc4, 0x9e, 0xb1, 0x2c, 0x0c, + 0xbe, 0x2c, 0xa4, 0xf5, 0x69, 0x4a, 0x5b, 0x56, 0x2c, 0x64, 0x8f, 0x0c, 0xc8, 0xb0, 0x3d, 0x89, + 0x77, 0x7a, 0x46, 0x8f, 0x94, 0xc1, 0x6c, 0x56, 0x08, 0xef, 0xa5, 0xb3, 0xbd, 0x46, 0xf4, 0x3a, + 0x95, 0x76, 0x5f, 0x4b, 0xe9, 0x09, 0xa5, 0x5e, 0x64, 0x46, 0xce, 0x62, 0xb8, 0x19, 0x81, 0x76, + 0x54, 0xee, 0xaa, 0x86, 0x2b, 0xda, 0xf4, 0xde, 0xf4, 0x5a, 0x03, 0x32, 0xec, 0x5c, 0x1c, 0xb3, + 0x7a, 0x1c, 0xfb, 0x1b, 0xc7, 0xc6, 0xfb, 0x71, 0xa3, 0xc3, 0xf0, 0x75, 0xda, 0x9c, 0x4e, 0x6f, + 0x27, 0x15, 0x3e, 0xba, 0x5e, 0x6f, 0x21, 0xd9, 0x6c, 0x21, 0xd9, 0x6d, 0x81, 0xbc, 0x06, 0x20, + 0x1f, 0x01, 0xc8, 0x67, 0x00, 0xb2, 0x0e, 0x40, 0xbe, 0x03, 0x90, 0x9f, 0x00, 0xc9, 0x2e, 0x00, + 0x79, 0x2b, 0x21, 0x59, 0x97, 0x90, 0x6c, 0x4a, 0x48, 0x1e, 0x1b, 0xda, 0x65, 0x07, 0xb1, 0xfa, + 0xf2, 0x37, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x07, 0x40, 0x1c, 0x70, 0x01, 0x00, 0x00, +} + +func (this *FileSourceDeployment) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FileSourceDeployment) + if !ok { + that2, ok := that.(FileSourceDeployment) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.GlobPattern != that1.GlobPattern { + return false + } + if this.TableName != that1.TableName { + return false + } + if !this.TTL.Equal(that1.TTL) { + return false + } + return true +} +func (this *FileSourceDeployment) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&ir.FileSourceDeployment{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "GlobPattern: "+fmt.Sprintf("%#v", this.GlobPattern)+",\n") + s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") + if this.TTL != nil { + s = append(s, "TTL: "+fmt.Sprintf("%#v", this.TTL)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringLogical(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FileSourceDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileSourceDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileSourceDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TTL != nil { + { + size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLogical(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarintLogical(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0x1a + } + if len(m.GlobPattern) > 0 { + i -= len(m.GlobPattern) + copy(dAtA[i:], m.GlobPattern) + i = encodeVarintLogical(dAtA, i, uint64(len(m.GlobPattern))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintLogical(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintLogical(dAtA []byte, offset int, v uint64) int { + offset -= sovLogical(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FileSourceDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovLogical(uint64(l)) + } + l = len(m.GlobPattern) + if l > 0 { + n += 1 + l + sovLogical(uint64(l)) + } + l = len(m.TableName) + if l > 0 { + n += 1 + l + sovLogical(uint64(l)) + } + if m.TTL != nil { + l = m.TTL.Size() + n += 1 + l + sovLogical(uint64(l)) + } + return n +} + +func sovLogical(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLogical(x uint64) (n int) { + return sovLogical(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FileSourceDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileSourceDeployment{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GlobPattern:` + fmt.Sprintf("%v", this.GlobPattern) + `,`, + `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, + `TTL:` + strings.Replace(fmt.Sprintf("%v", this.TTL), "Duration", "types.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringLogical(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FileSourceDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileSourceDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileSourceDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogical + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobPattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogical + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GlobPattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogical + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TTL == nil { + m.TTL = &types.Duration{} + } + if err := m.TTL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogical(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLogical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogical(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLogical + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLogical + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLogical + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLogical = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogical = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLogical = fmt.Errorf("proto: unexpected end of group") +) diff --git a/src/carnot/planner/file_source/ir/logical.proto b/src/carnot/planner/file_source/ir/logical.proto new file mode 100644 index 00000000000..7b64203c214 --- /dev/null +++ b/src/carnot/planner/file_source/ir/logical.proto @@ -0,0 +1,39 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +syntax = "proto3"; + +package px.carnot.planner.file_source.ir; + +option go_package = "ir"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; + +// A logical file source deployment +message FileSourceDeployment { + // For now this is the same as glob_pattern, but in the future may provide a logical name for the + // file source. + string name = 1; + // The glob pattern to use to find files to read. + string glob_pattern = 2; + // The table name to write the data to. + string table_name = 3; + // The ttl to run the file source for. -1 indicates that the file source should run indefinitely. + google.protobuf.Duration ttl = 4 [ (gogoproto.customname) = "TTL" ]; +} diff --git a/src/carnot/planner/file_source/log_module.cc b/src/carnot/planner/file_source/log_module.cc new file mode 100644 index 00000000000..6df5e582311 --- /dev/null +++ b/src/carnot/planner/file_source/log_module.cc @@ -0,0 +1,104 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/carnot/planner/file_source/log_module.h" + +namespace px { +namespace carnot { +namespace planner { +namespace compiler { + +class FileSourceHandler { + public: + static StatusOr Eval(MutationsIR* mutations_ir, const pypa::AstPtr& ast, + const ParsedArgs& args, ASTVisitor* visitor); +}; + +class DeleteFileSourceHandler { + public: + static StatusOr Eval(MutationsIR* mutations_ir, const pypa::AstPtr& ast, + const ParsedArgs& args, ASTVisitor* visitor); +}; + +StatusOr> LogModule::Create(MutationsIR* mutations_ir, + ASTVisitor* ast_visitor) { + auto tracing_module = std::shared_ptr(new LogModule(mutations_ir, ast_visitor)); + PX_RETURN_IF_ERROR(tracing_module->Init()); + return tracing_module; +} + +Status LogModule::Init() { + PX_ASSIGN_OR_RETURN( + std::shared_ptr upsert_fn, + FuncObject::Create(kFileSourceID, {"glob_pattern", "table_name", "ttl"}, {}, + /* has_variable_len_args */ false, + /* has_variable_len_kwargs */ false, + std::bind(FileSourceHandler::Eval, mutations_ir_, std::placeholders::_1, + std::placeholders::_2, std::placeholders::_3), + ast_visitor())); + PX_RETURN_IF_ERROR(upsert_fn->SetDocString(kFileSourceDocstring)); + AddMethod(kFileSourceID, upsert_fn); + + PX_ASSIGN_OR_RETURN(std::shared_ptr delete_fn, + FuncObject::Create(kFileSourceID, {"name"}, {}, + /* has_variable_len_args */ false, + /* has_variable_len_kwargs */ false, + std::bind(DeleteFileSourceHandler::Eval, mutations_ir_, + std::placeholders::_1, std::placeholders::_2, + std::placeholders::_3), + ast_visitor())); + PX_RETURN_IF_ERROR(upsert_fn->SetDocString(kDeleteFileSourceDocstring)); + AddMethod(kDeleteFileSourceID, delete_fn); + + return Status::OK(); +} + +StatusOr FileSourceHandler::Eval(MutationsIR* mutations_ir, const pypa::AstPtr& ast, + const ParsedArgs& args, ASTVisitor* visitor) { + DCHECK(mutations_ir); + + PX_ASSIGN_OR_RETURN(auto glob_pattern_ir, GetArgAs(ast, args, "glob_pattern")); + PX_ASSIGN_OR_RETURN(auto table_name_ir, GetArgAs(ast, args, "table_name")); + PX_ASSIGN_OR_RETURN(auto ttl_ir, GetArgAs(ast, args, "ttl")); + + const std::string& glob_pattern_str = glob_pattern_ir->str(); + const std::string& table_name_str = table_name_ir->str(); + PX_ASSIGN_OR_RETURN(int64_t ttl_ns, StringToTimeInt(ttl_ir->str())); + + mutations_ir->CreateFileSourceDeployment(glob_pattern_str, table_name_str, ttl_ns); + + return std::static_pointer_cast(std::make_shared(ast, visitor)); +} + +StatusOr DeleteFileSourceHandler::Eval(MutationsIR* mutations_ir, + const pypa::AstPtr& ast, const ParsedArgs& args, + ASTVisitor* visitor) { + DCHECK(mutations_ir); + + PX_ASSIGN_OR_RETURN(auto glob_pattern_ir, GetArgAs(ast, args, "name")); + const std::string& glob_pattern_str = glob_pattern_ir->str(); + + mutations_ir->DeleteFileSource(glob_pattern_str); + + return std::static_pointer_cast(std::make_shared(ast, visitor)); +} + +} // namespace compiler +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/file_source/log_module.h b/src/carnot/planner/file_source/log_module.h new file mode 100644 index 00000000000..5d5520dafa5 --- /dev/null +++ b/src/carnot/planner/file_source/log_module.h @@ -0,0 +1,69 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once +#include +#include +#include +#include + +#include "src/carnot/planner/compiler_state/compiler_state.h" +#include "src/carnot/planner/objects/funcobject.h" +#include "src/carnot/planner/objects/none_object.h" +#include "src/carnot/planner/probes/probes.h" + +namespace px { +namespace carnot { +namespace planner { +namespace compiler { + +class LogModule : public QLObject { + public: + static constexpr TypeDescriptor LogModuleType = { + /* name */ "pxlog", + /* type */ QLObjectType::kLogModule, + }; + static StatusOr> Create(MutationsIR* mutations_ir, + ASTVisitor* ast_visitor); + + // Constant for the modules. + inline static constexpr char kLogModuleObjName[] = "pxlog"; + + inline static constexpr char kFileSourceID[] = "FileSource"; + inline static constexpr char kFileSourceDocstring[] = R"doc( + TBD + )doc"; + + inline static constexpr char kDeleteFileSourceID[] = "DeleteFileSource"; + inline static constexpr char kDeleteFileSourceDocstring[] = R"doc( + TBD + )doc"; + + protected: + explicit LogModule(MutationsIR* mutations_ir, ASTVisitor* ast_visitor) + : QLObject(LogModuleType, ast_visitor), mutations_ir_(mutations_ir) {} + Status Init(); + + private: + MutationsIR* mutations_ir_; +}; + +} // namespace compiler +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/ir/grpc_sink_ir.cc b/src/carnot/planner/ir/grpc_sink_ir.cc index b087d3eaefc..786da032781 100644 --- a/src/carnot/planner/ir/grpc_sink_ir.cc +++ b/src/carnot/planner/ir/grpc_sink_ir.cc @@ -24,6 +24,7 @@ namespace planner { Status GRPCSinkIR::CopyFromNodeImpl(const IRNode* node, absl::flat_hash_map*) { + PX_RETURN_IF_ERROR(SinkOperatorIR::CopyFromNodeImpl(node, nullptr)); const GRPCSinkIR* grpc_sink = static_cast(node); sink_type_ = grpc_sink->sink_type_; destination_id_ = grpc_sink->destination_id_; @@ -35,6 +36,7 @@ Status GRPCSinkIR::CopyFromNodeImpl(const IRNode* node, } Status GRPCSinkIR::ToProto(planpb::Operator* op) const { + PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); CHECK(has_output_table()); auto pb = op->mutable_grpc_sink_op(); op->set_op_type(planpb::GRPC_SINK_OPERATOR); @@ -54,6 +56,7 @@ Status GRPCSinkIR::ToProto(planpb::Operator* op) const { } Status GRPCSinkIR::ToProto(planpb::Operator* op, int64_t agent_id) const { + PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); auto pb = op->mutable_grpc_sink_op(); op->set_op_type(planpb::GRPC_SINK_OPERATOR); pb->set_address(destination_address()); diff --git a/src/carnot/planner/ir/grpc_sink_ir.h b/src/carnot/planner/ir/grpc_sink_ir.h index b8ef691a6f6..9dea6307de3 100644 --- a/src/carnot/planner/ir/grpc_sink_ir.h +++ b/src/carnot/planner/ir/grpc_sink_ir.h @@ -43,9 +43,10 @@ namespace planner { * 1. SetDistributedID(string): Set the name of the node same as the query broker. * 2. SetDestinationAddress(string): the GRPC address where batches should be sent. */ -class GRPCSinkIR : public OperatorIR { +class GRPCSinkIR : public SinkOperatorIR { public: - explicit GRPCSinkIR(int64_t id) : OperatorIR(id, IRNodeType::kGRPCSink) {} + explicit GRPCSinkIR(int64_t id, std::string mutation_id) + : SinkOperatorIR(id, IRNodeType::kGRPCSink, mutation_id) {} enum GRPCSinkType { kTypeNotSet = 0, @@ -110,6 +111,17 @@ class GRPCSinkIR : public OperatorIR { destination_ssl_targetname_ = ssl_targetname; } + std::string DebugString() const override { + auto sink_op_str = SinkOperatorIR::DebugString(); + std::vector agent_ids; + for (const auto& [agent_id, _] : agent_id_to_destination_id_) { + agent_ids.push_back(agent_id); + } + return absl::Substitute("$0(id=$1, destination_id=$2, destination_address=$3, sink_type=$4, agent_ids=$5 sink_op=$6)", + type_string(), id(), destination_id_, destination_address_, + sink_type_, absl::StrJoin(agent_ids, ","), sink_op_str); + } + const std::string& destination_address() const { return destination_address_; } bool DestinationAddressSet() const { return destination_address_ != ""; } const std::string& destination_ssl_targetname() const { return destination_ssl_targetname_; } diff --git a/src/carnot/planner/ir/ir.h b/src/carnot/planner/ir/ir.h index faeb0623eea..df5c88aecae 100644 --- a/src/carnot/planner/ir/ir.h +++ b/src/carnot/planner/ir/ir.h @@ -49,6 +49,7 @@ namespace planner { class ExpressionIR; class OperatorIR; +class SinkOperatorIR; /** * IR contains the intermediate representation of the query @@ -77,7 +78,13 @@ class IR { template StatusOr MakeNode(int64_t id, const pypa::AstPtr& ast) { id_node_counter = std::max(id + 1, id_node_counter); - auto node = std::make_unique(id); + std::unique_ptr node; + if constexpr (std::is_base_of_v) { + auto mutation_id = mutation_id_.value_or(""); + node = std::make_unique(id, mutation_id); + } else { + node = std::make_unique(id); + } dag_.AddNode(node->id()); node->set_graph(this); if (ast != nullptr) { @@ -123,6 +130,9 @@ class IR { } // Use the source's ID if we are copying in to a different graph. auto new_node_id = this == source->graph() ? id_node_counter : source->id(); + if (this != source->graph()) { + mutation_id_ = source->graph()->mutation_id(); + } DCHECK(!HasNode(new_node_id)) << source->DebugString(); PX_ASSIGN_OR_RETURN(IRNode * new_node, MakeNodeWithType(source->type(), new_node_id)); PX_RETURN_IF_ERROR(new_node->CopyFromNode(source, copied_nodes_map)); @@ -258,6 +268,13 @@ class IR { return nodes; } + void RecordMutationId(std::optional mutation_id) { + DCHECK(!mutation_id_.has_value()) << "Mutation ID should only be set once."; + mutation_id_ = mutation_id; + } + + std::optional mutation_id() const { return mutation_id_; } + friend std::ostream& operator<<(std::ostream& os, const std::shared_ptr&) { return os << "ir"; } @@ -270,6 +287,7 @@ class IR { plan::DAG dag_; std::unordered_map id_node_map_; int64_t id_node_counter = 0; + std::optional mutation_id_ = std::nullopt; }; Status ResolveOperatorType(OperatorIR* op, CompilerState* compiler_state); diff --git a/src/carnot/planner/ir/memory_sink_ir.cc b/src/carnot/planner/ir/memory_sink_ir.cc index 943e165f47a..7e8fffee763 100644 --- a/src/carnot/planner/ir/memory_sink_ir.cc +++ b/src/carnot/planner/ir/memory_sink_ir.cc @@ -31,6 +31,7 @@ Status MemorySinkIR::Init(OperatorIR* parent, const std::string& name, } Status MemorySinkIR::ToProto(planpb::Operator* op) const { + PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); auto pb = op->mutable_mem_sink_op(); pb->set_name(name_); op->set_op_type(planpb::MEMORY_SINK_OPERATOR); @@ -47,6 +48,7 @@ Status MemorySinkIR::ToProto(planpb::Operator* op) const { Status MemorySinkIR::CopyFromNodeImpl(const IRNode* node, absl::flat_hash_map*) { + PX_RETURN_IF_ERROR(SinkOperatorIR::CopyFromNodeImpl(node, nullptr)); const MemorySinkIR* sink_ir = static_cast(node); name_ = sink_ir->name_; out_columns_ = sink_ir->out_columns_; diff --git a/src/carnot/planner/ir/memory_sink_ir.h b/src/carnot/planner/ir/memory_sink_ir.h index c43b36698f3..eb50373a41f 100644 --- a/src/carnot/planner/ir/memory_sink_ir.h +++ b/src/carnot/planner/ir/memory_sink_ir.h @@ -38,10 +38,11 @@ namespace planner { /** * The MemorySinkIR describes the MemorySink operator. */ -class MemorySinkIR : public OperatorIR { +class MemorySinkIR : public SinkOperatorIR { public: MemorySinkIR() = delete; - explicit MemorySinkIR(int64_t id) : OperatorIR(id, IRNodeType::kMemorySink) {} + explicit MemorySinkIR(int64_t id, std::string mutation_id) + : SinkOperatorIR(id, IRNodeType::kMemorySink, mutation_id) {} std::string name() const { return name_; } void set_name(const std::string& name) { name_ = name; } diff --git a/src/carnot/planner/ir/memory_source_ir.cc b/src/carnot/planner/ir/memory_source_ir.cc index fc367ce7fc0..18e92dc2107 100644 --- a/src/carnot/planner/ir/memory_source_ir.cc +++ b/src/carnot/planner/ir/memory_source_ir.cc @@ -29,6 +29,7 @@ std::string MemorySourceIR::DebugString() const { } Status MemorySourceIR::ToProto(planpb::Operator* op) const { + PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); auto pb = op->mutable_mem_source_op(); op->set_op_type(planpb::MEMORY_SOURCE_OPERATOR); pb->set_name(table_name_); diff --git a/src/carnot/planner/ir/memory_source_ir.h b/src/carnot/planner/ir/memory_source_ir.h index 757632d2096..2339ab4b27f 100644 --- a/src/carnot/planner/ir/memory_source_ir.h +++ b/src/carnot/planner/ir/memory_source_ir.h @@ -40,10 +40,10 @@ namespace planner { * @brief The MemorySourceIR is a dual logical plan * and IR node operator. It inherits from both classes */ -class MemorySourceIR : public OperatorIR { +class MemorySourceIR : public SinkOperatorIR { public: MemorySourceIR() = delete; - explicit MemorySourceIR(int64_t id) : OperatorIR(id, IRNodeType::kMemorySource) {} + explicit MemorySourceIR(int64_t id, std::string mutation_id) : SinkOperatorIR(id, IRNodeType::kMemorySource, mutation_id) {} /** * @brief Initialize the memory source. diff --git a/src/carnot/planner/ir/operator_ir.h b/src/carnot/planner/ir/operator_ir.h index a719432efec..c899679f8bb 100644 --- a/src/carnot/planner/ir/operator_ir.h +++ b/src/carnot/planner/ir/operator_ir.h @@ -181,6 +181,40 @@ class OperatorIR : public IRNode { std::vector parent_types_; bool parent_types_set_ = false; }; + +class SinkOperatorIR : public OperatorIR { + public: + std::string DebugString() const { + return absl::Substitute("$0(id=$1, mutation_id=$2)", type_string(), id(), mutation_id_); + } + + protected: + explicit SinkOperatorIR(int64_t id, IRNodeType type, std::string mutation_id) + : OperatorIR(id, type), mutation_id_(mutation_id) {} + + virtual Status ToProto(planpb::Operator* op) const { + if (mutation_id_.empty()) { + return Status::OK(); + } + auto context = op->mutable_context(); + context->insert({"mutation_id", mutation_id_}); + return Status::OK(); + } + + /** + * @brief Override of CopyFromNode that adds special handling for Operators. + */ + virtual Status CopyFromNodeImpl(const IRNode* node, + absl::flat_hash_map*) { + const SinkOperatorIR* source = static_cast(node); + mutation_id_ = source->mutation_id_; + return Status::OK(); + } + + private: + std::string mutation_id_; +}; + } // namespace planner } // namespace carnot } // namespace px diff --git a/src/carnot/planner/ir/otel_export_sink_ir.cc b/src/carnot/planner/ir/otel_export_sink_ir.cc index 672ca2c5767..defa26e3ee3 100644 --- a/src/carnot/planner/ir/otel_export_sink_ir.cc +++ b/src/carnot/planner/ir/otel_export_sink_ir.cc @@ -18,6 +18,8 @@ #include +#include + #include "src/carnot/planner/ir/ir.h" #include "src/carnot/planner/ir/otel_export_sink_ir.h" #include "src/carnot/planpb/plan.pb.h" @@ -160,10 +162,34 @@ Status OTelExportSinkIR::ProcessConfig(const OTelData& data) { new_span.span_kind = span.span_kind; data_.spans.push_back(std::move(new_span)); } + for (const auto& log : data.logs) { + OTelLog new_log; + + PX_ASSIGN_OR_RETURN(new_log.time_column, AddColumn(log.time_column)); + PX_ASSIGN_OR_RETURN(new_log.body_column, AddColumn(log.body_column)); + if (log.observed_time_column != nullptr) { + PX_ASSIGN_OR_RETURN(new_log.observed_time_column, AddColumn(log.observed_time_column)); + } + + new_log.severity_text = log.severity_text; + new_log.severity_number = log.severity_number; + + for (const auto& attr : log.attributes) { + if (attr.column_reference == nullptr) { + new_log.attributes.push_back({attr.name, nullptr, attr.string_value}); + continue; + } + PX_ASSIGN_OR_RETURN(auto column, AddColumn(attr.column_reference)); + new_log.attributes.push_back({attr.name, column, ""}); + } + + data_.logs.push_back(std::move(new_log)); + } return Status::OK(); } Status OTelExportSinkIR::ToProto(planpb::Operator* op) const { + PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); op->set_op_type(planpb::OTEL_EXPORT_SINK_OPERATOR); auto otel_op = op->mutable_otel_sink_op(); *otel_op->mutable_endpoint_config() = data_.endpoint_config; @@ -330,11 +356,56 @@ Status OTelExportSinkIR::ToProto(planpb::Operator* op) const { } span_pb->set_kind_value(span.span_kind); } + for (const auto& log : data_.logs) { + auto log_pb = otel_op->add_logs(); + + if (log.time_column->EvaluatedDataType() != types::TIME64NS) { + return log.time_column->CreateIRNodeError( + "Expected time column '$0' to be TIME64NS, received $1", log.time_column->col_name(), + types::ToString(log.time_column->EvaluatedDataType())); + } + PX_ASSIGN_OR_RETURN(auto time_column_index, + log.time_column->GetColumnIndex()); + log_pb->set_time_column_index(time_column_index); + + if (log.observed_time_column != nullptr) { + if (log.observed_time_column->EvaluatedDataType() != types::TIME64NS) { + return log.observed_time_column->CreateIRNodeError( + "Expected observed_time column '$0' to be TIME64NS, received $1", log.observed_time_column->col_name(), + types::ToString(log.observed_time_column->EvaluatedDataType())); + } + PX_ASSIGN_OR_RETURN(auto observed_time_column_index, + log.observed_time_column->GetColumnIndex()); + log_pb->set_observed_time_column_index(observed_time_column_index); + } else { + log_pb->set_observed_time_column_index(-1); + } + + log_pb->set_severity_text(log.severity_text); + + // TODO(ddelnano): Add validation for severity_number if the planner isn't the right + // place to implement the validation. + log_pb->set_severity_number(log.severity_number); + + if (log.body_column->EvaluatedDataType() != types::STRING) { + return log.body_column->CreateIRNodeError( + "Expected body column '$0' to be STRING, received $1", log.body_column->col_name(), + types::ToString(log.body_column->EvaluatedDataType())); + } + PX_ASSIGN_OR_RETURN(auto body_column_index, + log.body_column->GetColumnIndex()); + log_pb->set_body_column_index(body_column_index); + + for (const auto& attribute : log.attributes) { + PX_RETURN_IF_ERROR(attribute.ToProto(log_pb->add_attributes())); + } + } return Status::OK(); } Status OTelExportSinkIR::CopyFromNodeImpl(const IRNode* node, absl::flat_hash_map*) { + PX_RETURN_IF_ERROR(SinkOperatorIR::CopyFromNodeImpl(node, nullptr)); const OTelExportSinkIR* source = static_cast(node); return ProcessConfig(source->data_); } diff --git a/src/carnot/planner/ir/otel_export_sink_ir.h b/src/carnot/planner/ir/otel_export_sink_ir.h index 2caad972498..cced5ada202 100644 --- a/src/carnot/planner/ir/otel_export_sink_ir.h +++ b/src/carnot/planner/ir/otel_export_sink_ir.h @@ -127,11 +127,23 @@ struct OTelSpan { int64_t span_kind; }; +struct OTelLog { + std::vector attributes; + + ColumnIR* time_column; + ColumnIR* observed_time_column = nullptr; + ColumnIR* body_column; + + int64_t severity_number; + std::string severity_text; +}; + struct OTelData { planpb::OTelEndpointConfig endpoint_config; std::vector resource_attributes; std::vector metrics; std::vector spans; + std::vector logs; }; /** @@ -139,9 +151,10 @@ struct OTelData { * Represents a configuration to transform a DataFrame into OpenTelemetry * data. */ -class OTelExportSinkIR : public OperatorIR { +class OTelExportSinkIR : public SinkOperatorIR { public: - explicit OTelExportSinkIR(int64_t id) : OperatorIR(id, IRNodeType::kOTelExportSink) {} + explicit OTelExportSinkIR(int64_t id, std::string mutation_id) + : SinkOperatorIR(id, IRNodeType::kOTelExportSink, mutation_id) {} Status Init(OperatorIR* parent, const OTelData& data) { PX_RETURN_IF_ERROR(ProcessConfig(data)); diff --git a/src/carnot/planner/ir/otel_export_sink_ir_test.cc b/src/carnot/planner/ir/otel_export_sink_ir_test.cc index b508b2d8afb..e70abb21637 100644 --- a/src/carnot/planner/ir/otel_export_sink_ir_test.cc +++ b/src/carnot/planner/ir/otel_export_sink_ir_test.cc @@ -443,6 +443,85 @@ INSTANTIATE_TEST_SUITE_P( .ConsumeValueOrDie(); }, }, + { + "logs_basic", + table_store::schema::Relation{ + {types::TIME64NS, types::STRING, types::STRING}, + {"start_time", "attribute_str", "log_message"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE}}, + R"pb( + endpoint_config {} + resource {} + logs { + attributes { + name: "service.name" + column { + column_type: STRING + column_index: 1 + } + } + time_column_index: 0 + observed_time_column_index: -1 + severity_number: 4 + severity_text: "INFO" + body_column_index: 2 + } + )pb", + [](IR* graph, OperatorIR* parent, table_store::schema::Relation* relation) { + OTelData data; + + auto& log = data.logs.emplace_back(); + log.time_column = CreateTypedColumn(graph, "start_time", relation); + log.attributes.push_back( + {"service.name", CreateTypedColumn(graph, "attribute_str", relation), ""}); + log.severity_number = 4; + log.severity_text = "INFO"; + log.body_column = CreateTypedColumn(graph, "log_message", relation); + + return graph->CreateNode(parent->ast(), parent, data) + .ConsumeValueOrDie(); + }, + }, + { + "logs_with_observed_time_col", + table_store::schema::Relation{ + {types::TIME64NS, types::TIME64NS, types::STRING, types::STRING}, + {"start_time", "observed_time", "attribute_str", "log_message"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE, types::ST_NONE}}, + R"pb( + endpoint_config {} + resource {} + logs { + attributes { + name: "service.name" + column { + column_type: STRING + column_index: 2 + } + } + time_column_index: 0 + observed_time_column_index: 1 + severity_number: 4 + severity_text: "INFO" + body_column_index: 3 + } + )pb", + [](IR* graph, OperatorIR* parent, table_store::schema::Relation* relation) { + OTelData data; + + auto& log = data.logs.emplace_back(); + log.time_column = CreateTypedColumn(graph, "start_time", relation); + log.observed_time_column = CreateTypedColumn(graph, "observed_time", relation); + log.attributes.push_back( + {"service.name", CreateTypedColumn(graph, "attribute_str", relation), ""}); + log.severity_number = 4; + log.severity_text = "INFO"; + log.body_column = CreateTypedColumn(graph, "log_message", relation); + + return graph->CreateNode(parent->ast(), parent, data) + .ConsumeValueOrDie(); + }, + }, { "string_value_attributes", table_store::schema::Relation{{types::TIME64NS, types::INT64}, @@ -557,6 +636,33 @@ OTelExportSinkIR* CreateSpanWithNameString(IR* graph, OperatorIR* parent, return graph->CreateNode(parent->ast(), parent, data).ConsumeValueOrDie(); } +OTelExportSinkIR* CreateLog(IR* graph, OperatorIR* parent, + table_store::schema::Relation* relation) { + OTelData data; + + auto& log = data.logs.emplace_back(); + log.time_column = CreateTypedColumn(graph, "start_time", relation); + log.body_column = CreateTypedColumn(graph, "log_message", relation); + log.severity_number = 4; + log.severity_text = "INFO"; + + return graph->CreateNode(parent->ast(), parent, data).ConsumeValueOrDie(); +} + +OTelExportSinkIR* CreateLogWithObservedTime(IR* graph, OperatorIR* parent, + table_store::schema::Relation* relation) { + OTelData data; + + auto& log = data.logs.emplace_back(); + log.time_column = CreateTypedColumn(graph, "start_time", relation); + log.observed_time_column = CreateTypedColumn(graph, "observed_time", relation); + log.body_column = CreateTypedColumn(graph, "log_message", relation); + log.severity_number = 4; + log.severity_text = "INFO"; + + return graph->CreateNode(parent->ast(), parent, data).ConsumeValueOrDie(); +} + INSTANTIATE_TEST_SUITE_P( ErrorTests, WrongColumnTypesTest, ::testing::ValuesIn(std::vector{ @@ -723,6 +829,33 @@ INSTANTIATE_TEST_SUITE_P( .ConsumeValueOrDie(); }, }, + { + "log_time_column_wrong", + table_store::schema::Relation{ + {types::INT64, types::STRING, types::STRING}, + {"start_time", "attribute_str", "log_message"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE}}, + "Expected time column 'start_time' to be TIME64NS, received INT64", + &CreateLog, + }, + { + "log_body_column_wrong", + table_store::schema::Relation{ + {types::TIME64NS, types::STRING, types::TIME64NS}, + {"start_time", "attribute_str", "log_message"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE}}, + "Expected body column 'log_message' to be STRING, received TIME64NS", + &CreateLog, + }, + { + "log_observed_time_column_wrong", + table_store::schema::Relation{ + {types::TIME64NS, types::INT64, types::STRING, types::STRING}, + {"start_time", "observed_time", "attribute_str", "log_message"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE, types::ST_NONE}}, + "Expected observed_time column 'observed_time' to be TIME64NS, received INT64", + &CreateLogWithObservedTime, + }, }), [](const ::testing::TestParamInfo& info) { return info.param.name; }); } // namespace planner diff --git a/src/carnot/planner/logical_planner_test.cc b/src/carnot/planner/logical_planner_test.cc index 4c3e8659c88..2acd981cbc9 100644 --- a/src/carnot/planner/logical_planner_test.cc +++ b/src/carnot/planner/logical_planner_test.cc @@ -946,7 +946,13 @@ px.export(df, px.otel.Data( px.otel.metric.Gauge( name='resp_latency', value=df.resp_latency_ns, - ) + ), + px.otel.log.Log( + time=df.time_, + severity_number=px.otel.log.SEVERITY_NUMBER_INFO, + severity_text="info", + body=df.service, + ), ] )) )pxl"; @@ -1039,6 +1045,146 @@ px.export(otel_df, px.otel.Data( )))otel"); } +constexpr char kFileSourceQuery[] = R"pxl( +import pxlog +import px + +glob_pattern= '/var/log/kern.log' +table_name='kern.log' +ttl='10m' +pxlog.FileSource(glob_pattern, table_name, ttl) + +df = px.DataFrame(table=table_name) +px.export(df, px.otel.Data( + endpoint=px.otel.Endpoint(url="px.dev:55555"), + resource={ + 'service.name' : df.service, + }, + data=[ + px.otel.metric.Gauge( + name='resp_latency', + value=df.resp_latency_ns, + ) + ] +)) +)pxl"; + +TEST_F(LogicalPlannerTest, FileSourceMutation) { + auto planner = LogicalPlanner::Create(info_).ConsumeValueOrDie(); + auto state = testutils::CreateTwoPEMsOneKelvinPlannerState(testutils::kFileSourceSchema); + plannerpb::CompileMutationsRequest req; + req.set_query_str(kFileSourceQuery); + *req.mutable_logical_planner_state() = state; + auto log_ir_or_s = planner->CompileTrace(req); + ASSERT_OK(log_ir_or_s); + auto log_ir = log_ir_or_s.ConsumeValueOrDie(); + plannerpb::CompileMutationsResponse resp; + ASSERT_OK(log_ir->ToProto(&resp)); + ASSERT_EQ(resp.mutations_size(), 1); + /* EXPECT_THAT(resp.mutations()[0].trace(), EqualsProto(kBPFTwoTraceProgramsPb)); */ +} + +TEST_F(LogicalPlannerTest, FileSourcePlan) { + auto planner = LogicalPlanner::Create(info_).ConsumeValueOrDie(); + auto state = testutils::CreateTwoPEMsOneKelvinPlannerState(testutils::kFileSourceSchema); + // Correspond to the two pems in the planner state + std::vector agent_ids = {1, 2}; + auto plan_or_s = planner->Plan(MakeQueryRequest(state, kFileSourceQuery)); + EXPECT_OK(plan_or_s); + auto plan = plan_or_s.ConsumeValueOrDie(); + EXPECT_OK(plan->ToProto()); + + auto otel_export_matched = false; + auto grpc_sink_matched = false; + for (const auto& id : plan->dag().TopologicalSort()) { + auto subgraph = plan->Get(id)->plan(); + auto otel_export = subgraph->FindNodesOfType(IRNodeType::kOTelExportSink); + auto grpc_sink = subgraph->FindNodesOfType(IRNodeType::kGRPCSink); + if (otel_export.empty() && grpc_sink.empty()) { + continue; + } + if (!otel_export.empty()) { + EXPECT_EQ(1, otel_export.size()); + planpb::Operator op; + auto otel_export_ir = static_cast(otel_export[0]); + EXPECT_OK(otel_export_ir->ToProto(&op)); + EXPECT_EQ(1, op.context().size()); + otel_export_matched = true; + } + if (!grpc_sink.empty()) { + EXPECT_EQ(1, grpc_sink.size()); + for (auto agent_id : agent_ids) { + planpb::Operator op; + auto grpc_sink_ir = static_cast(grpc_sink[0]); + EXPECT_OK(grpc_sink_ir->ToProto(&op, agent_id)); + EXPECT_EQ(1, op.context().size()); + } + grpc_sink_matched = true; + } + } + EXPECT_TRUE(otel_export_matched); + EXPECT_TRUE(grpc_sink_matched); +} + +const char kExplicitStreamId[] = R"pxl( +import px + +df = px.DataFrame(table='http_events', start_time='-6m', mutation_id='mutation') +df.service = df.ctx['service'] +px.export(df, px.otel.Data( + endpoint=px.otel.Endpoint(url="px.dev:55555"), + resource={ + 'service.name' : df.service, + }, + data=[ + px.otel.metric.Gauge( + name='resp_latency', + value=df.resp_latency_ns, + ) + ] +)) +)pxl"; +TEST_F(LogicalPlannerTest, non_mutation_dataframe_with_explicit_stream_id) { + auto state = testutils::CreateTwoPEMsOneKelvinPlannerState(testutils::kHttpEventsSchema); + auto planner = LogicalPlanner::Create(info_).ConsumeValueOrDie(); + // Correspond to the two pems in the planner state + std::vector agent_ids = {1, 2}; + + ASSERT_OK_AND_ASSIGN(auto plan, planner->Plan(MakeQueryRequest(state, kExplicitStreamId))); + ASSERT_OK(plan->ToProto()); + + auto otel_export_matched = false; + auto grpc_sink_matched = false; + for (const auto& id : plan->dag().TopologicalSort()) { + auto subgraph = plan->Get(id)->plan(); + auto otel_export = subgraph->FindNodesOfType(IRNodeType::kOTelExportSink); + auto grpc_sink = subgraph->FindNodesOfType(IRNodeType::kGRPCSink); + if (otel_export.empty() && grpc_sink.empty()) { + continue; + } + if (!otel_export.empty()) { + EXPECT_EQ(1, otel_export.size()); + planpb::Operator op; + auto otel_export_ir = static_cast(otel_export[0]); + EXPECT_OK(otel_export_ir->ToProto(&op)); + EXPECT_EQ(1, op.context().size()); + otel_export_matched = true; + } + if (!grpc_sink.empty()) { + EXPECT_EQ(1, grpc_sink.size()); + for (auto agent_id : agent_ids) { + planpb::Operator op; + auto grpc_sink_ir = static_cast(grpc_sink[0]); + EXPECT_OK(grpc_sink_ir->ToProto(&op, agent_id)); + EXPECT_EQ(1, op.context().size()); + } + grpc_sink_matched = true; + } + } + EXPECT_TRUE(otel_export_matched); + EXPECT_TRUE(grpc_sink_matched); +} + } // namespace planner } // namespace carnot } // namespace px diff --git a/src/carnot/planner/objects/BUILD.bazel b/src/carnot/planner/objects/BUILD.bazel index 060dc6a7888..09dfd062c26 100644 --- a/src/carnot/planner/objects/BUILD.bazel +++ b/src/carnot/planner/objects/BUILD.bazel @@ -37,6 +37,7 @@ pl_cc_library( "//src/carnot/planner/parser:cc_library", "//src/shared/types/typespb/wrapper:cc_library", "@com_github_opentelemetry_proto//:trace_proto_cc", + "@com_github_opentelemetry_proto//:logs_proto_cc", "@com_github_vinzenz_libpypa//:libpypa", ], ) diff --git a/src/carnot/planner/objects/dataframe.cc b/src/carnot/planner/objects/dataframe.cc index 13140b40e17..4548d676a1b 100644 --- a/src/carnot/planner/objects/dataframe.cc +++ b/src/carnot/planner/objects/dataframe.cc @@ -41,15 +41,19 @@ StatusOr> GetAsDataFrame(QLObjectPtr obj) { } StatusOr> Dataframe::Create(CompilerState* compiler_state, - OperatorIR* op, ASTVisitor* visitor) { - std::shared_ptr df(new Dataframe(compiler_state, op, op->graph(), visitor)); + OperatorIR* op, ASTVisitor* visitor, + std::optional mutation_id) { + std::shared_ptr df( + new Dataframe(compiler_state, op, op->graph(), visitor, mutation_id)); PX_RETURN_IF_ERROR(df->Init()); return df; } StatusOr> Dataframe::Create(CompilerState* compiler_state, IR* graph, - ASTVisitor* visitor) { - std::shared_ptr df(new Dataframe(compiler_state, nullptr, graph, visitor)); + ASTVisitor* visitor, + std::optional mutation_id) { + std::shared_ptr df( + new Dataframe(compiler_state, nullptr, graph, visitor, mutation_id)); PX_RETURN_IF_ERROR(df->Init()); return df; } @@ -124,7 +128,23 @@ StatusOr DataFrameConstructor(CompilerState* compiler_state, IR* gr ParseAllTimeFormats(compiler_state->time_now().val, end_time)); mem_source_op->SetTimeStopNS(end_time_ns); } - return Dataframe::Create(compiler_state, mem_source_op, visitor); + StringIR* mutation_id_ir = nullptr; + if (!NoneObject::IsNoneObject(args.GetArg("mutation_id"))) { + PX_ASSIGN_OR_RETURN(mutation_id_ir, GetArgAs(ast, args, "mutation_id")); + } + auto relation_map = compiler_state->relation_map(); + std::optional mutation_id = std::nullopt; + if (mutation_id_ir != nullptr) { + mutation_id = mutation_id_ir->str(); + } + for (const auto& [table_name, relation] : *relation_map) { + if (table_name == table->str() && mutation_id == std::nullopt) { + mutation_id = relation.mutation_id(); + break; + } + } + graph->RecordMutationId(mutation_id); + return Dataframe::Create(compiler_state, mem_source_op, visitor, mutation_id); } StatusOr> ProcessCols(IR* graph, const pypa::AstPtr& ast, QLObjectPtr obj, @@ -174,7 +194,7 @@ StatusOr JoinHandler(CompilerState* compiler_state, IR* graph, Oper PX_ASSIGN_OR_RETURN(JoinIR * join_op, graph->CreateNode(ast, std::vector{op, right}, how_type, left_on_cols, right_on_cols, suffix_strs)); - return Dataframe::Create(compiler_state, join_op, visitor); + return Dataframe::Create(compiler_state, join_op, visitor, graph->mutation_id()); } StatusOr ParseNameTuple(IR* ir, const pypa::AstPtr& ast, @@ -235,7 +255,7 @@ StatusOr AggHandler(CompilerState* compiler_state, IR* graph, Opera PX_ASSIGN_OR_RETURN( BlockingAggIR * agg_op, graph->CreateNode(ast, op, std::vector{}, aggregate_expressions)); - return Dataframe::Create(compiler_state, agg_op, visitor); + return Dataframe::Create(compiler_state, agg_op, visitor, graph->mutation_id()); } StatusOr MapAssignHandler(const pypa::AstPtr& ast, const ParsedArgs&, ASTVisitor*) { @@ -252,7 +272,7 @@ StatusOr DropHandler(CompilerState* compiler_state, IR* graph, Oper PX_ASSIGN_OR_RETURN(std::vector columns, ParseAsListOfStrings(args.GetArg("columns"), "columns")); PX_ASSIGN_OR_RETURN(DropIR * drop_op, graph->CreateNode(ast, op, columns)); - return Dataframe::Create(compiler_state, drop_op, visitor); + return Dataframe::Create(compiler_state, drop_op, visitor, graph->mutation_id()); } // Handles the head() DataFrame logic. @@ -267,7 +287,7 @@ StatusOr LimitHandler(CompilerState* compiler_state, IR* graph, Ope PX_ASSIGN_OR_RETURN(LimitIR * limit_op, graph->CreateNode(ast, op, limit_value, pem_only_val)); - return Dataframe::Create(compiler_state, limit_op, visitor); + return Dataframe::Create(compiler_state, limit_op, visitor, graph->mutation_id()); } class SubscriptHandler { @@ -315,7 +335,7 @@ StatusOr SubscriptHandler::EvalFilter(CompilerState* compiler_state OperatorIR* op, const pypa::AstPtr& ast, ExpressionIR* expr, ASTVisitor* visitor) { PX_ASSIGN_OR_RETURN(FilterIR * filter_op, graph->CreateNode(ast, op, expr)); - return Dataframe::Create(compiler_state, filter_op, visitor); + return Dataframe::Create(compiler_state, filter_op, visitor, graph->mutation_id()); } StatusOr SubscriptHandler::EvalColumn(IR* graph, OperatorIR*, const pypa::AstPtr&, @@ -349,7 +369,7 @@ StatusOr SubscriptHandler::EvalKeep(CompilerState* compiler_state, PX_ASSIGN_OR_RETURN(MapIR * map_op, graph->CreateNode(ast, op, keep_exprs, /* keep_input_columns */ false)); - return Dataframe::Create(compiler_state, map_op, visitor); + return Dataframe::Create(compiler_state, map_op, visitor, graph->mutation_id()); } // Handles the groupby() method. @@ -367,7 +387,7 @@ StatusOr GroupByHandler(CompilerState* compiler_state, IR* graph, O } PX_ASSIGN_OR_RETURN(GroupByIR * group_by_op, graph->CreateNode(ast, op, groups)); - return Dataframe::Create(compiler_state, group_by_op, visitor); + return Dataframe::Create(compiler_state, group_by_op, visitor, graph->mutation_id()); } // Handles the append() dataframe method and creates the union node. @@ -380,7 +400,7 @@ StatusOr UnionHandler(CompilerState* compiler_state, IR* graph, Ope parents.push_back(casted); } PX_ASSIGN_OR_RETURN(UnionIR * union_op, graph->CreateNode(ast, parents)); - return Dataframe::Create(compiler_state, union_op, visitor); + return Dataframe::Create(compiler_state, union_op, visitor, graph->mutation_id()); } // Handles the rolling() dataframe method. @@ -405,7 +425,7 @@ StatusOr RollingHandler(CompilerState* compiler_state, IR* graph, O PX_ASSIGN_OR_RETURN(RollingIR * rolling_op, graph->CreateNode(ast, op, window_col, window_size)); - return Dataframe::Create(compiler_state, rolling_op, visitor); + return Dataframe::Create(compiler_state, rolling_op, visitor, graph->mutation_id()); } /** @@ -416,15 +436,15 @@ StatusOr StreamHandler(CompilerState* compiler_state, IR* graph, Op const pypa::AstPtr& ast, const ParsedArgs&, ASTVisitor* visitor) { PX_ASSIGN_OR_RETURN(StreamIR * stream_op, graph->CreateNode(ast, op)); - return Dataframe::Create(compiler_state, stream_op, visitor); + return Dataframe::Create(compiler_state, stream_op, visitor, graph->mutation_id()); } Status Dataframe::Init() { PX_ASSIGN_OR_RETURN( std::shared_ptr constructor_fn, FuncObject::Create( - name(), {"table", "select", "start_time", "end_time"}, - {{"select", "[]"}, {"start_time", "None"}, {"end_time", "None"}}, + name(), {"table", "select", "start_time", "end_time", "mutation_id"}, + {{"select", "[]"}, {"start_time", "None"}, {"end_time", "None"}, {"mutation_id", "None"}}, /* has_variable_len_args */ false, /* has_variable_len_kwargs */ false, std::bind(&DataFrameConstructor, compiler_state_, graph(), std::placeholders::_1, @@ -628,7 +648,7 @@ StatusOr> Dataframe::FromColumnAssignment(CompilerSta ColExpressionVector map_exprs{{col_name, expr}}; PX_ASSIGN_OR_RETURN(MapIR * ir_node, graph_->CreateNode(expr_node, op(), map_exprs, /*keep_input_cols*/ true)); - return Dataframe::Create(compiler_state, ir_node, ast_visitor()); + return Dataframe::Create(compiler_state, ir_node, ast_visitor(), graph_->mutation_id()); } } // namespace compiler diff --git a/src/carnot/planner/objects/dataframe.h b/src/carnot/planner/objects/dataframe.h index 73f7514ba15..e239e382131 100644 --- a/src/carnot/planner/objects/dataframe.h +++ b/src/carnot/planner/objects/dataframe.h @@ -43,10 +43,12 @@ class Dataframe : public QLObject { /* name */ "DataFrame", /* type */ QLObjectType::kDataframe, }; - static StatusOr> Create(CompilerState* compiler_state, OperatorIR* op, - ASTVisitor* visitor); - static StatusOr> Create(CompilerState* compiler_state, IR* graph, - ASTVisitor* visitor); + static StatusOr> Create( + CompilerState* compiler_state, OperatorIR* op, ASTVisitor* visitor, + std::optional mutation_id = std::nullopt); + static StatusOr> Create( + CompilerState* compiler_state, IR* graph, ASTVisitor* visitor, + std::optional mutation_id = std::nullopt); static bool IsDataframe(const QLObjectPtr& object) { return object->type() == DataframeType.type(); } @@ -430,7 +432,17 @@ class Dataframe : public QLObject { : QLObject(DataframeType, op ? op->ast() : nullptr, visitor), compiler_state_(compiler_state), op_(op), - graph_(graph) {} + graph_(graph), + mutation_id_(std::nullopt) {} + + explicit Dataframe(CompilerState* compiler_state, OperatorIR* op, IR* graph, ASTVisitor* visitor, + std::optional mutation_id) + : QLObject(DataframeType, op ? op->ast() : nullptr, visitor), + compiler_state_(compiler_state), + op_(op), + graph_(graph), + mutation_id_(mutation_id) {} + StatusOr> GetAttributeImpl(const pypa::AstPtr& ast, std::string_view name) const override; @@ -441,6 +453,7 @@ class Dataframe : public QLObject { CompilerState* compiler_state_; OperatorIR* op_ = nullptr; IR* graph_ = nullptr; + std::optional mutation_id_; }; StatusOr> GetAsDataFrame(QLObjectPtr obj); diff --git a/src/carnot/planner/objects/otel.cc b/src/carnot/planner/objects/otel.cc index 7f79d6196bb..78d56b6cc6a 100644 --- a/src/carnot/planner/objects/otel.cc +++ b/src/carnot/planner/objects/otel.cc @@ -18,6 +18,7 @@ #include "src/carnot/planner/objects/otel.h" #include +#include #include #include @@ -39,6 +40,8 @@ namespace carnot { namespace planner { namespace compiler { +using OTelLogRecord = px::carnot::planner::OTelLog; + StatusOr> OTelModule::Create(CompilerState* compiler_state, ASTVisitor* ast_visitor, IR* ir) { auto otel_module = std::shared_ptr(new OTelModule(ast_visitor)); @@ -58,6 +61,12 @@ StatusOr> OTelTrace::Create(ASTVisitor* ast_visitor, return otel_trace; } +StatusOr> OTelLog::Create(ASTVisitor* ast_visitor, IR* graph) { + auto otel_trace = std::shared_ptr(new OTelLog(ast_visitor, graph)); + PX_RETURN_IF_ERROR(otel_trace->Init()); + return otel_trace; +} + StatusOr> EndpointConfig::Create( ASTVisitor* ast_visitor, std::string url, std::vector attributes, bool insecure, int64_t timeout) { @@ -96,7 +105,7 @@ Status ParseEndpointConfig(CompilerState* compiler_state, const QLObjectPtr& end } StatusOr> OTelDataContainer::Create( - ASTVisitor* ast_visitor, std::variant data) { + ASTVisitor* ast_visitor, std::variant data) { return std::shared_ptr(new OTelDataContainer(ast_visitor, std::move(data))); } @@ -249,6 +258,7 @@ StatusOr OTelDataDefinition(CompilerState* compiler_state, const py std::visit(overloaded{ [&otel_data](const OTelMetric& metric) { otel_data.metrics.push_back(metric); }, [&otel_data](const OTelSpan& span) { otel_data.spans.push_back(span); }, + [&otel_data](const OTelLogRecord& log) { otel_data.logs.push_back(log); }, }, container->data()); } @@ -326,6 +336,9 @@ Status OTelModule::Init(CompilerState* compiler_state, IR* ir) { PX_ASSIGN_OR_RETURN(auto trace, OTelTrace::Create(ast_visitor(), ir)); PX_RETURN_IF_ERROR(AssignAttribute("trace", trace)); + PX_ASSIGN_OR_RETURN(auto log, OTelLog::Create(ast_visitor(), ir)); + PX_RETURN_IF_ERROR(AssignAttribute("log", log)); + PX_ASSIGN_OR_RETURN( std::shared_ptr endpoint_fn, FuncObject::Create(kEndpointOpID, {"url", "headers", "insecure", "timeout"}, @@ -466,6 +479,71 @@ Status OTelTrace::Init() { return Status::OK(); } +Status OTelLog::AddSeverityNumberAttributes() { + auto ast = std::make_shared(pypa::AstType::Number); + const google::protobuf::EnumDescriptor* severity_num_desc = ::opentelemetry::proto::logs::v1::SeverityNumber_descriptor(); + if (!severity_num_desc) { + // TODO(ddelnano): return an error + } + for (int i = 0; i < severity_num_desc->value_count(); ++i) { + const google::protobuf::EnumValueDescriptor* value_desc = severity_num_desc->value(i); + PX_ASSIGN_OR_RETURN(IntIR * severity_number, + graph_->CreateNode(ast, static_cast(value_desc->number()))); + PX_ASSIGN_OR_RETURN(auto value, ExprObject::Create(severity_number, ast_visitor())); + PX_RETURN_IF_ERROR(AssignAttribute(value_desc->name(), value)); + } + PX_UNUSED(graph_); + return Status::OK(); +} + +StatusOr LogDefinition(const pypa::AstPtr& ast, const ParsedArgs& args, + ASTVisitor* visitor) { + OTelLogRecord log; + + PX_ASSIGN_OR_RETURN(log.time_column, GetArgAs(ast, args, "time")); + if (!NoneObject::IsNoneObject(args.GetArg("observed_time"))) { + PX_ASSIGN_OR_RETURN(log.observed_time_column, GetArgAs(ast, args, "observed_time")); + } + + PX_ASSIGN_OR_RETURN(log.body_column, GetArgAs(ast, args, "body")); + PX_ASSIGN_OR_RETURN(auto severity_number, GetArgAs(ast, args, "severity_number")); + log.severity_number = severity_number->val(); + + PX_ASSIGN_OR_RETURN(auto severity_text, GetArgAsString(ast, args, "severity_text")); + log.severity_text = severity_text; + + QLObjectPtr attributes = args.GetArg("attributes"); + if (!DictObject::IsDict(attributes)) { + return attributes->CreateError("Expected attributes to be a dictionary, received $0", + attributes->name()); + } + + PX_ASSIGN_OR_RETURN(log.attributes, ParseAttributes(static_cast(attributes.get()))); + + return OTelDataContainer::Create(visitor, std::move(log)); +} + +Status OTelLog::Init() { + // Setup methods. + PX_ASSIGN_OR_RETURN(std::shared_ptr span_fn, + FuncObject::Create(kLogOpID, + {"time", "observed_time", "body", "attributes", "severity_number", "severity_text"}, + {{"observed_time", "None"}, + {"severity_number", "px.otel.log.SEVERITY_NUMBER_INFO"}, + {"severity_text", "info"}, + {"attributes", "{}"}}, + /* has_variable_len_args */ false, + /* has_variable_len_kwargs */ false, + std::bind(&LogDefinition, std::placeholders::_1, + std::placeholders::_2, std::placeholders::_3), + ast_visitor())); + PX_RETURN_IF_ERROR(span_fn->SetDocString(kLogOpDocstring)); + AddMethod(kLogOpID, span_fn); + + PX_RETURN_IF_ERROR(AddSeverityNumberAttributes()); + return Status::OK(); +} + Status EndpointConfig::ToProto(planpb::OTelEndpointConfig* pb) { pb->set_url(url_); for (const auto& attr : attributes_) { diff --git a/src/carnot/planner/objects/otel.h b/src/carnot/planner/objects/otel.h index 5f4c1d19eb7..9ec639ffe37 100644 --- a/src/carnot/planner/objects/otel.h +++ b/src/carnot/planner/objects/otel.h @@ -24,6 +24,7 @@ #include #include "opentelemetry/proto/trace/v1/trace.pb.h" +#include "opentelemetry/proto/logs/v1/logs.pb.h" #include "src/carnot/planner/compiler_state/compiler_state.h" #include "src/carnot/planner/objects/funcobject.h" #include "src/carnot/planpb/plan.pb.h" @@ -212,6 +213,48 @@ class OTelTrace : public QLObject { IR* graph_; }; +class OTelLog : public QLObject { + public: + inline static constexpr char kOTelLogModule[] = "log"; + static constexpr TypeDescriptor OTelLogModuleType = { + /* name */ kOTelLogModule, + /* type */ QLObjectType::kModule, + }; + static StatusOr> Create(ASTVisitor* ast_visitor, IR* graph); + + inline static constexpr char kLogOpID[] = "Log"; + inline static constexpr char kLogOpDocstring[] = R"doc( + Defines the OpenTelemetry Log type. + + Log describes how to transform a pixie DataFrame into the OpenTelemetry + Log type. + + :topic: otel + + Args: + time (Column): The column that marks the timestamp for the log, must be TIME64NS. + observed_time (Column, optional): The column that marks the XXX of the log, must be TIME64NS. + body (Column): The column that contains the log message to emit, must be STRING. + severity_number (int, optional): The OpenTelemetry SeverityNumber enum value to assign for the log, defaults to SEVERITY_NUMBER_INFO if not set. + severity_text (string, optional): The log level associated with the log, defaults to "info" if not set. + if not set. + attributes (Dict[string, Column|string], optional): A mapping of attribute name to a string or the column + that stores data about the attribute. + Returns: + OTelDataContainer: the mapping of DataFrame columns to OpenTelemetry Log fields. Can be passed + into `px.otel.Data()` as the data argument. + )doc"; + + protected: + OTelLog(ASTVisitor* ast_visitor, IR* graph) + : QLObject(OTelLogModuleType, ast_visitor), graph_(graph) {} + Status Init(); + Status AddSeverityNumberAttributes(); + + private: + IR* graph_; +}; + class EndpointConfig : public QLObject { public: struct ConnAttribute { @@ -246,6 +289,7 @@ class EndpointConfig : public QLObject { }; class OTelDataContainer : public QLObject { + using OTelLogRecord = px::carnot::planner::OTelLog; public: static constexpr TypeDescriptor OTelDataContainerType = { /* name */ "OTelDataContainer", @@ -253,20 +297,20 @@ class OTelDataContainer : public QLObject { }; static StatusOr> Create( - ASTVisitor* ast_visitor, std::variant data); + ASTVisitor* ast_visitor, std::variant data); static bool IsOTelDataContainer(const QLObjectPtr& obj) { return obj->type() == OTelDataContainerType.type(); } - const std::variant& data() const { return data_; } + const std::variant& data() const { return data_; } protected: - OTelDataContainer(ASTVisitor* ast_visitor, std::variant data) + OTelDataContainer(ASTVisitor* ast_visitor, std::variant data) : QLObject(OTelDataContainerType, ast_visitor), data_(std::move(data)) {} private: - std::variant data_; + std::variant data_; }; } // namespace compiler diff --git a/src/carnot/planner/objects/otel_test.cc b/src/carnot/planner/objects/otel_test.cc index 97e21cd663e..c1b7fdfcac3 100644 --- a/src/carnot/planner/objects/otel_test.cc +++ b/src/carnot/planner/objects/otel_test.cc @@ -46,9 +46,11 @@ class OTelExportTest : public QLObjectTest { OTelModule::Create(compiler_state.get(), ast_visitor.get(), graph.get())); ASSERT_OK_AND_ASSIGN(auto otelmetric, OTelMetrics::Create(ast_visitor.get(), graph.get())); ASSERT_OK_AND_ASSIGN(auto oteltrace, OTelTrace::Create(ast_visitor.get(), graph.get())); + ASSERT_OK_AND_ASSIGN(auto otellog, OTelLog::Create(ast_visitor.get(), graph.get())); var_table->Add("otel", otel); var_table->Add("otelmetric", otelmetric); var_table->Add("oteltrace", oteltrace); + var_table->Add("otellog", otellog); } StatusOr ParseOutOTelExportIR(const std::string& otel_export_expression, @@ -469,6 +471,101 @@ otel_sink_op { parent_span_id_column_index: 7 kind_value: 2 } +})pb"}, + {"log_basic", + R"pxl( +otel.Data( + endpoint=otel.Endpoint( + url='0.0.0.0:55690', + ), + resource={ + 'service.name' : df.service, + }, + data=[ + otellog.Log( + time=df.start_time, + severity_number=4, + severity_text='info', + body=df.log_message, + ), + ] +))pxl", + table_store::schema::Relation{ + {types::TIME64NS, types::STRING, types::STRING}, + {"start_time", "service", "log_message"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE}, + }, + R"pb( +op_type: OTEL_EXPORT_SINK_OPERATOR +otel_sink_op { + endpoint_config { + url: "0.0.0.0:55690" + timeout: 5 + } + resource { + attributes { + name: "service.name" + column { + column_type: STRING + column_index: 1 + } + } + } + logs { + time_column_index: 0 + observed_time_column_index: -1 + body_column_index: 2 + severity_number: 4 + severity_text: "info" + } +})pb"}, + {"log_with_observed_time", + R"pxl( +otel.Data( + endpoint=otel.Endpoint( + url='0.0.0.0:55690', + ), + resource={ + 'service.name' : df.service, + }, + data=[ + otellog.Log( + time=df.time_, + observed_time=df.end_time, + severity_number=4, + severity_text='info', + body=df.log_message, + ), + ] +))pxl", + table_store::schema::Relation{ + {types::TIME64NS, types::TIME64NS, types::STRING, types::STRING}, + {"time_", "end_time", "service", "log_message"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE, types::ST_NONE}, + }, + R"pb( +op_type: OTEL_EXPORT_SINK_OPERATOR +otel_sink_op { + endpoint_config { + url: "0.0.0.0:55690" + timeout: 5 + } + resource { + attributes { + name: "service.name" + column { + column_type: STRING + column_index: 2 + } + } + } + logs { + time_column_index: 0 + observed_time_column_index: 1 + body_column_index: 3 + severity_number: 4 + severity_text: "info" + } })pb"}, {"all_attribute_types", R"pxl( diff --git a/src/carnot/planner/objects/qlobject.h b/src/carnot/planner/objects/qlobject.h index 4231fb78b0e..62733cad776 100644 --- a/src/carnot/planner/objects/qlobject.h +++ b/src/carnot/planner/objects/qlobject.h @@ -66,6 +66,7 @@ enum class QLObjectType { kExporter, kOTelEndpoint, kOTelDataContainer, + kLogModule, }; std::string QLObjectTypeString(QLObjectType type); diff --git a/src/carnot/planner/plannerpb/BUILD.bazel b/src/carnot/planner/plannerpb/BUILD.bazel index 4b73065c498..8fb5c37e0e4 100644 --- a/src/carnot/planner/plannerpb/BUILD.bazel +++ b/src/carnot/planner/plannerpb/BUILD.bazel @@ -28,6 +28,7 @@ pl_proto_library( deps = [ "//src/carnot/planner/distributedpb:distributed_plan_pl_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_proto", + "//src/carnot/planner/file_source/ir:logical_pl_proto", "//src/carnot/planpb:plan_pl_proto", "//src/common/base/statuspb:status_pl_proto", "//src/shared/types/typespb:types_pl_proto", @@ -42,6 +43,7 @@ pl_cc_proto_library( deps = [ "//src/carnot/planner/distributedpb:distributed_plan_pl_cc_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", + "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/carnot/planpb:plan_pl_cc_proto", "//src/common/base/statuspb:status_pl_cc_proto", "//src/shared/types/typespb/wrapper:cc_library", @@ -56,6 +58,7 @@ pl_go_proto_library( deps = [ "//src/carnot/planner/distributedpb:distributed_plan_pl_go_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto", + "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/carnot/planpb:plan_pl_go_proto", "//src/common/base/statuspb:status_pl_go_proto", "//src/shared/types/typespb:types_pl_go_proto", diff --git a/src/carnot/planner/plannerpb/service.pb.go b/src/carnot/planner/plannerpb/service.pb.go index 172eeb1cd81..71eda5ae84a 100755 --- a/src/carnot/planner/plannerpb/service.pb.go +++ b/src/carnot/planner/plannerpb/service.pb.go @@ -17,6 +17,7 @@ import ( math_bits "math/bits" distributedpb "px.dev/pixie/src/carnot/planner/distributedpb" logicalpb "px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb" + ir "px.dev/pixie/src/carnot/planner/file_source/ir" statuspb "px.dev/pixie/src/common/base/statuspb" reflect "reflect" strings "strings" @@ -599,18 +600,63 @@ func (m *ConfigUpdate) GetAgentPodName() string { return "" } +type DeleteFileSource struct { + GlobPattern string `protobuf:"bytes,1,opt,name=glob_pattern,json=globPattern,proto3" json:"glob_pattern,omitempty"` +} + +func (m *DeleteFileSource) Reset() { *m = DeleteFileSource{} } +func (*DeleteFileSource) ProtoMessage() {} +func (*DeleteFileSource) Descriptor() ([]byte, []int) { + return fileDescriptor_710b3465b5cdfdeb, []int{7} +} +func (m *DeleteFileSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteFileSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteFileSource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteFileSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteFileSource.Merge(m, src) +} +func (m *DeleteFileSource) XXX_Size() int { + return m.Size() +} +func (m *DeleteFileSource) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteFileSource.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteFileSource proto.InternalMessageInfo + +func (m *DeleteFileSource) GetGlobPattern() string { + if m != nil { + return m.GlobPattern + } + return "" +} + type CompileMutation struct { // Types that are valid to be assigned to Mutation: // *CompileMutation_Trace // *CompileMutation_DeleteTracepoint // *CompileMutation_ConfigUpdate + // *CompileMutation_FileSource + // *CompileMutation_DeleteFileSource Mutation isCompileMutation_Mutation `protobuf_oneof:"mutation"` } func (m *CompileMutation) Reset() { *m = CompileMutation{} } func (*CompileMutation) ProtoMessage() {} func (*CompileMutation) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{7} + return fileDescriptor_710b3465b5cdfdeb, []int{8} } func (m *CompileMutation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -655,10 +701,18 @@ type CompileMutation_DeleteTracepoint struct { type CompileMutation_ConfigUpdate struct { ConfigUpdate *ConfigUpdate `protobuf:"bytes,4,opt,name=config_update,json=configUpdate,proto3,oneof" json:"config_update,omitempty"` } +type CompileMutation_FileSource struct { + FileSource *ir.FileSourceDeployment `protobuf:"bytes,5,opt,name=file_source,json=fileSource,proto3,oneof" json:"file_source,omitempty"` +} +type CompileMutation_DeleteFileSource struct { + DeleteFileSource *DeleteFileSource `protobuf:"bytes,6,opt,name=delete_file_source,json=deleteFileSource,proto3,oneof" json:"delete_file_source,omitempty"` +} func (*CompileMutation_Trace) isCompileMutation_Mutation() {} func (*CompileMutation_DeleteTracepoint) isCompileMutation_Mutation() {} func (*CompileMutation_ConfigUpdate) isCompileMutation_Mutation() {} +func (*CompileMutation_FileSource) isCompileMutation_Mutation() {} +func (*CompileMutation_DeleteFileSource) isCompileMutation_Mutation() {} func (m *CompileMutation) GetMutation() isCompileMutation_Mutation { if m != nil { @@ -688,12 +742,28 @@ func (m *CompileMutation) GetConfigUpdate() *ConfigUpdate { return nil } +func (m *CompileMutation) GetFileSource() *ir.FileSourceDeployment { + if x, ok := m.GetMutation().(*CompileMutation_FileSource); ok { + return x.FileSource + } + return nil +} + +func (m *CompileMutation) GetDeleteFileSource() *DeleteFileSource { + if x, ok := m.GetMutation().(*CompileMutation_DeleteFileSource); ok { + return x.DeleteFileSource + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*CompileMutation) XXX_OneofWrappers() []interface{} { return []interface{}{ (*CompileMutation_Trace)(nil), (*CompileMutation_DeleteTracepoint)(nil), (*CompileMutation_ConfigUpdate)(nil), + (*CompileMutation_FileSource)(nil), + (*CompileMutation_DeleteFileSource)(nil), } } @@ -705,7 +775,7 @@ type CompileMutationsResponse struct { func (m *CompileMutationsResponse) Reset() { *m = CompileMutationsResponse{} } func (*CompileMutationsResponse) ProtoMessage() {} func (*CompileMutationsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{8} + return fileDescriptor_710b3465b5cdfdeb, []int{9} } func (m *CompileMutationsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -756,7 +826,7 @@ type GenerateOTelScriptRequest struct { func (m *GenerateOTelScriptRequest) Reset() { *m = GenerateOTelScriptRequest{} } func (*GenerateOTelScriptRequest) ProtoMessage() {} func (*GenerateOTelScriptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{9} + return fileDescriptor_710b3465b5cdfdeb, []int{10} } func (m *GenerateOTelScriptRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -807,7 +877,7 @@ type GenerateOTelScriptResponse struct { func (m *GenerateOTelScriptResponse) Reset() { *m = GenerateOTelScriptResponse{} } func (*GenerateOTelScriptResponse) ProtoMessage() {} func (*GenerateOTelScriptResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{10} + return fileDescriptor_710b3465b5cdfdeb, []int{11} } func (m *GenerateOTelScriptResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -862,6 +932,7 @@ func init() { proto.RegisterType((*CompileMutationsRequest)(nil), "px.carnot.planner.plannerpb.CompileMutationsRequest") proto.RegisterType((*DeleteTracepoint)(nil), "px.carnot.planner.plannerpb.DeleteTracepoint") proto.RegisterType((*ConfigUpdate)(nil), "px.carnot.planner.plannerpb.ConfigUpdate") + proto.RegisterType((*DeleteFileSource)(nil), "px.carnot.planner.plannerpb.DeleteFileSource") proto.RegisterType((*CompileMutation)(nil), "px.carnot.planner.plannerpb.CompileMutation") proto.RegisterType((*CompileMutationsResponse)(nil), "px.carnot.planner.plannerpb.CompileMutationsResponse") proto.RegisterType((*GenerateOTelScriptRequest)(nil), "px.carnot.planner.plannerpb.GenerateOTelScriptRequest") @@ -873,77 +944,82 @@ func init() { } var fileDescriptor_710b3465b5cdfdeb = []byte{ - // 1108 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x51, 0x6f, 0x1b, 0xc5, - 0x13, 0xf7, 0xd9, 0x69, 0x63, 0x8f, 0x9d, 0xfe, 0xd3, 0x4d, 0xfe, 0xe0, 0xba, 0xe2, 0x12, 0x9d, - 0x0a, 0x0a, 0x01, 0xce, 0x90, 0x06, 0x82, 0x2a, 0x01, 0xc2, 0x4d, 0x20, 0x54, 0xa5, 0x84, 0x4b, - 0xda, 0x87, 0xaa, 0xe2, 0x74, 0xbe, 0x9b, 0xb8, 0x27, 0xce, 0x7b, 0xd7, 0xdd, 0xbd, 0xca, 0xe1, - 0x85, 0x16, 0x89, 0x77, 0x24, 0xbe, 0x02, 0x42, 0x20, 0x3e, 0x03, 0xef, 0x3c, 0xe6, 0xb1, 0x4f, - 0x11, 0x71, 0x24, 0xc4, 0x63, 0x3f, 0x02, 0xda, 0xdd, 0xbb, 0xc4, 0x49, 0xdc, 0xc4, 0x89, 0x78, - 0xe4, 0xe9, 0x66, 0x67, 0x67, 0x7e, 0x33, 0xfb, 0x9b, 0x99, 0xdd, 0x83, 0x79, 0xce, 0xfc, 0xa6, - 0xef, 0x31, 0x1a, 0x8b, 0x66, 0x12, 0x79, 0x94, 0x22, 0xcb, 0xbf, 0x49, 0xbb, 0xc9, 0x91, 0x3d, - 0x0e, 0x7d, 0xb4, 0x13, 0x16, 0x8b, 0x98, 0x5c, 0x4d, 0x7a, 0xb6, 0x36, 0xb5, 0x33, 0x13, 0x7b, - 0xdf, 0xb4, 0xf1, 0xc1, 0x10, 0xa0, 0x60, 0x8b, 0x7a, 0xdd, 0xd0, 0x77, 0x05, 0xf3, 0xfc, 0x90, - 0x76, 0x9a, 0x21, 0x6b, 0x46, 0x71, 0x27, 0xf4, 0xbd, 0x28, 0x69, 0xe7, 0x92, 0xc6, 0x6e, 0xbc, - 0xaa, 0xdc, 0xe3, 0x6e, 0x37, 0xa6, 0xcd, 0xb6, 0xc7, 0xb1, 0xc9, 0x85, 0x27, 0x52, 0x2e, 0x73, - 0x50, 0x42, 0x66, 0x36, 0xdd, 0x89, 0x3b, 0xb1, 0x12, 0x9b, 0x52, 0xca, 0xb4, 0x4b, 0xc3, 0x62, - 0x87, 0x5c, 0xb0, 0xb0, 0x9d, 0x0a, 0x0c, 0x92, 0xf6, 0xe0, 0xca, 0x95, 0x16, 0xda, 0xd1, 0xfa, - 0xcb, 0x80, 0x89, 0x4f, 0x52, 0xea, 0x6f, 0xc4, 0x2b, 0x3d, 0xf4, 0x53, 0x81, 0xe4, 0x2a, 0x54, - 0x36, 0x53, 0xea, 0xbb, 0xd4, 0xeb, 0x62, 0xdd, 0x98, 0x35, 0xe6, 0x2a, 0x4e, 0x59, 0x2a, 0xee, - 0x78, 0x5d, 0x24, 0x0e, 0x80, 0xc7, 0x3a, 0xee, 0x63, 0x2f, 0x4a, 0x91, 0xd7, 0x8b, 0xb3, 0xa5, - 0xb9, 0xea, 0xc2, 0x75, 0xfb, 0x04, 0x56, 0xec, 0x43, 0xe0, 0xf6, 0xc7, 0xac, 0x73, 0x4f, 0xfa, - 0x3a, 0x15, 0x2f, 0x93, 0x38, 0xb1, 0x61, 0x2a, 0x4e, 0x45, 0x92, 0x0a, 0x57, 0x78, 0xed, 0x08, - 0xdd, 0x84, 0xe1, 0x66, 0xd8, 0xab, 0x97, 0x54, 0xe8, 0xcb, 0x7a, 0x6b, 0x43, 0xee, 0xac, 0xa9, - 0x8d, 0xc6, 0x22, 0x94, 0x73, 0x18, 0x42, 0x60, 0x6c, 0x20, 0x4f, 0x25, 0x93, 0x69, 0xb8, 0xa0, - 0xf2, 0xab, 0x17, 0x95, 0x52, 0x2f, 0xac, 0xdf, 0xc7, 0x60, 0xfc, 0x66, 0x4c, 0x37, 0xc3, 0x0e, - 0x27, 0x4f, 0x0d, 0x98, 0x8e, 0x05, 0x46, 0x2e, 0xd2, 0x20, 0x89, 0x43, 0x2a, 0x5c, 0x5f, 0xed, - 0x28, 0x98, 0xea, 0xc2, 0xd2, 0x89, 0x07, 0xca, 0x40, 0xec, 0x2f, 0x36, 0x30, 0x5a, 0xc9, 0xfc, - 0xb5, 0xae, 0xf5, 0x52, 0x7f, 0x67, 0x86, 0x1c, 0xd7, 0x3b, 0x44, 0x06, 0x3b, 0xac, 0x23, 0xf7, - 0x60, 0x22, 0x89, 0xd2, 0x4e, 0x48, 0xf3, 0xd8, 0x45, 0x15, 0xfb, 0x9d, 0x91, 0x62, 0xaf, 0x29, - 0xcf, 0x0c, 0xbd, 0x96, 0x0c, 0xac, 0x1a, 0x4f, 0x8b, 0x30, 0x24, 0x05, 0x72, 0x05, 0x4a, 0x29, - 0x8b, 0x34, 0x4f, 0xad, 0xf1, 0xfe, 0xce, 0x4c, 0xe9, 0xae, 0x73, 0xdb, 0x91, 0x3a, 0xf2, 0x15, - 0x8c, 0x3f, 0x44, 0x2f, 0x40, 0x96, 0x17, 0x74, 0xf9, 0x9c, 0xe7, 0xb7, 0x57, 0x35, 0xcc, 0x0a, - 0x15, 0x6c, 0xcb, 0xc9, 0x41, 0x49, 0x03, 0xca, 0x21, 0xe5, 0xe8, 0xa7, 0x0c, 0x55, 0x51, 0xcb, - 0xce, 0xfe, 0x9a, 0xd4, 0x61, 0x5c, 0x84, 0x5d, 0x8c, 0x53, 0x51, 0x1f, 0x9b, 0x35, 0xe6, 0x4a, - 0x4e, 0xbe, 0x6c, 0xdc, 0x80, 0xda, 0x20, 0x1c, 0x99, 0x84, 0xd2, 0xd7, 0xb8, 0x95, 0x15, 0x5a, - 0x8a, 0xc3, 0xeb, 0x7c, 0xa3, 0xf8, 0xbe, 0xd1, 0x70, 0xa0, 0x36, 0xc8, 0x10, 0xb1, 0x60, 0x82, - 0x0b, 0x8f, 0x09, 0x57, 0x82, 0xbb, 0x94, 0x2b, 0x94, 0x92, 0x53, 0x55, 0xca, 0x8d, 0xb0, 0x8b, - 0x77, 0x38, 0x31, 0xa1, 0x8a, 0x34, 0xd8, 0xb7, 0x28, 0x2a, 0x8b, 0x0a, 0xd2, 0x40, 0xef, 0x5b, - 0x3f, 0x17, 0xa1, 0xf6, 0x65, 0x8a, 0x6c, 0xcb, 0xc1, 0x47, 0x29, 0x72, 0x41, 0x1e, 0xc2, 0xff, - 0xb3, 0x01, 0x76, 0x33, 0x72, 0x5c, 0x39, 0xa8, 0x58, 0xbf, 0xa0, 0x0a, 0xb9, 0x38, 0x84, 0xc4, - 0x43, 0x13, 0x69, 0xdf, 0xd6, 0xde, 0x6b, 0x7a, 0x73, 0x5d, 0xfa, 0x3a, 0x53, 0xd1, 0x71, 0xa5, - 0x9c, 0xc8, 0x47, 0x32, 0xb2, 0xcb, 0x05, 0xcb, 0x27, 0x52, 0x29, 0xd6, 0x05, 0x23, 0x9f, 0x01, - 0x60, 0x0f, 0x7d, 0x57, 0x8e, 0x28, 0xaf, 0x97, 0x54, 0x01, 0xe7, 0x47, 0x9f, 0x48, 0xa7, 0x22, - 0xbd, 0xa5, 0x8a, 0x93, 0x0f, 0x61, 0x5c, 0xf7, 0x22, 0x57, 0xc5, 0xa8, 0x2e, 0x5c, 0x1b, 0xa5, - 0x11, 0x9c, 0xdc, 0xe9, 0xd6, 0x58, 0xb9, 0x38, 0x59, 0xb2, 0xbe, 0x33, 0x60, 0x22, 0x23, 0x8a, - 0x27, 0x31, 0xe5, 0x48, 0xde, 0x80, 0x8b, 0xfa, 0x0a, 0xcb, 0xe6, 0x6b, 0x4a, 0xc2, 0xe6, 0xb7, - 0x9b, 0xbd, 0xae, 0x04, 0x27, 0x33, 0x21, 0xcb, 0x30, 0x26, 0x43, 0x64, 0xe3, 0xf0, 0xf6, 0xa9, - 0x2c, 0x2e, 0x1f, 0xac, 0x24, 0x69, 0x8e, 0xf2, 0xb6, 0x7e, 0x2b, 0xc2, 0xcb, 0x37, 0xe3, 0x6e, - 0x12, 0x46, 0xf8, 0x79, 0x2a, 0x3c, 0x11, 0xc6, 0x94, 0xff, 0x57, 0xb8, 0x17, 0x14, 0xce, 0x7a, - 0x0d, 0x26, 0x97, 0x31, 0x42, 0x81, 0x1b, 0xcc, 0xf3, 0x51, 0x4d, 0xf4, 0xb0, 0x9b, 0xd5, 0x7a, - 0x00, 0x35, 0xed, 0x7b, 0x37, 0x09, 0xe4, 0xf9, 0x46, 0x9c, 0x49, 0x72, 0x0d, 0x2e, 0x79, 0x1d, - 0xa4, 0xc2, 0x4d, 0xe2, 0x40, 0xbf, 0x2b, 0xfa, 0x72, 0xaf, 0x29, 0xed, 0x5a, 0x1c, 0xc8, 0xb7, - 0xc5, 0xfa, 0xb5, 0x08, 0xff, 0x3b, 0x52, 0x33, 0x72, 0x1f, 0x2e, 0xc8, 0xa7, 0x13, 0xb3, 0x76, - 0x68, 0x0d, 0xab, 0xcd, 0xe1, 0x27, 0xd6, 0x0e, 0x99, 0x9d, 0x3f, 0xac, 0x07, 0xc7, 0x59, 0xc6, - 0x24, 0x8a, 0xb7, 0xba, 0x48, 0xc5, 0x6a, 0xc1, 0xd1, 0x90, 0xe4, 0x01, 0x5c, 0x0e, 0xd4, 0xa9, - 0x95, 0xab, 0xb6, 0x53, 0x89, 0x55, 0x17, 0xde, 0x3a, 0x91, 0xbf, 0xa3, 0x5c, 0xad, 0x16, 0x9c, - 0xc9, 0xe0, 0x28, 0x7f, 0x6b, 0x30, 0xa1, 0xe9, 0x75, 0x53, 0x45, 0x56, 0x56, 0x99, 0xd7, 0x47, - 0xa8, 0x8c, 0x66, 0x77, 0xb5, 0xe0, 0xd4, 0xfc, 0x81, 0x75, 0x0b, 0xa0, 0xdc, 0xcd, 0x78, 0xb1, - 0x7e, 0x34, 0xa0, 0x7e, 0xbc, 0xbf, 0xcf, 0x33, 0x6f, 0xb7, 0xa0, 0x92, 0xa3, 0xe6, 0xf7, 0xff, - 0x9b, 0xa7, 0xe4, 0x78, 0x28, 0xac, 0x73, 0xe0, 0x6e, 0xfd, 0x64, 0xc0, 0x95, 0x4f, 0x91, 0x22, - 0xf3, 0x04, 0xca, 0xe7, 0x61, 0xdd, 0x67, 0x61, 0x22, 0x4e, 0x9d, 0x3b, 0xe3, 0xdf, 0x9e, 0xbb, - 0x57, 0x00, 0x92, 0x5e, 0xe4, 0x72, 0x15, 0x3e, 0x6b, 0xc5, 0x4a, 0xd2, 0xcb, 0xf2, 0xb1, 0xbe, - 0x81, 0xc6, 0xb0, 0x2c, 0xcf, 0xc3, 0x5e, 0x13, 0xaa, 0xea, 0x47, 0x62, 0x30, 0x54, 0xeb, 0x52, - 0x7f, 0x67, 0x06, 0x06, 0x90, 0x41, 0x9a, 0x68, 0x79, 0xe1, 0x49, 0x09, 0x2e, 0xe5, 0xb9, 0xea, - 0x5f, 0x4b, 0x82, 0x72, 0xaa, 0x14, 0xa7, 0xea, 0xda, 0x24, 0x27, 0xb7, 0xc8, 0xe0, 0x1b, 0xd4, - 0x98, 0x1f, 0xc5, 0x34, 0x3b, 0xd7, 0xb7, 0x30, 0x79, 0xb4, 0x63, 0xc8, 0xe2, 0x59, 0x2a, 0x9d, - 0x5f, 0xa0, 0x8d, 0x77, 0xcf, 0xe8, 0x95, 0x25, 0xf0, 0xbd, 0x01, 0xe4, 0x38, 0xef, 0xe4, 0xbd, - 0x13, 0xd1, 0x5e, 0xd8, 0x4e, 0x8d, 0xa5, 0x33, 0xfb, 0xe9, 0x3c, 0x5a, 0x1f, 0x6d, 0xef, 0x9a, - 0x85, 0x67, 0xbb, 0x66, 0xe1, 0xf9, 0xae, 0x69, 0x3c, 0xe9, 0x9b, 0xc6, 0x2f, 0x7d, 0xd3, 0xf8, - 0xa3, 0x6f, 0x1a, 0xdb, 0x7d, 0xd3, 0xf8, 0xb3, 0x6f, 0x1a, 0x7f, 0xf7, 0xcd, 0xc2, 0xf3, 0xbe, - 0x69, 0xfc, 0xb0, 0x67, 0x16, 0xb6, 0xf7, 0xcc, 0xc2, 0xb3, 0x3d, 0xb3, 0x70, 0xbf, 0xb2, 0x8f, - 0xdd, 0xbe, 0xa8, 0x7e, 0x9d, 0xaf, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xa7, 0x05, 0x48, 0x4a, - 0x3a, 0x0c, 0x00, 0x00, + // 1191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xda, 0x69, 0x63, 0x3f, 0x3b, 0x25, 0x9d, 0x16, 0x70, 0x5d, 0xb1, 0x2d, 0xab, 0x82, + 0x4a, 0x81, 0x35, 0xa4, 0xff, 0x50, 0x25, 0x40, 0xb8, 0x69, 0x09, 0x55, 0x29, 0x66, 0x93, 0x56, + 0xa2, 0x2a, 0xac, 0xd6, 0xeb, 0x17, 0x77, 0xc5, 0x7a, 0x76, 0x3b, 0x3b, 0x5b, 0x39, 0x5c, 0x68, + 0x91, 0xb8, 0x23, 0xf1, 0x15, 0x10, 0x42, 0xe2, 0x33, 0x70, 0xe7, 0x98, 0x63, 0x4f, 0x11, 0x71, + 0x24, 0xc4, 0xb1, 0x1f, 0x01, 0xcd, 0x9f, 0x8d, 0xd7, 0x89, 0x9b, 0x38, 0x11, 0x47, 0x4e, 0x99, + 0x79, 0xf3, 0xde, 0xef, 0xbd, 0xfd, 0xfd, 0xe6, 0xcd, 0x73, 0xe0, 0x42, 0xc2, 0xfc, 0xa6, 0xef, + 0x31, 0x1a, 0xf1, 0x66, 0x1c, 0x7a, 0x94, 0x22, 0xcb, 0xfe, 0xc6, 0x9d, 0x66, 0x82, 0xec, 0x71, + 0xe0, 0xa3, 0x1d, 0xb3, 0x88, 0x47, 0xe4, 0x74, 0x3c, 0xb0, 0x95, 0xab, 0xad, 0x5d, 0xec, 0x6d, + 0xd7, 0xc6, 0x87, 0x13, 0x80, 0xba, 0x6b, 0xd4, 0xeb, 0x07, 0xbe, 0xcb, 0x99, 0xe7, 0x07, 0xb4, + 0xd7, 0x0c, 0x58, 0x33, 0x8c, 0x7a, 0x81, 0xef, 0x85, 0x71, 0x27, 0x5b, 0x29, 0xec, 0x46, 0x73, + 0x42, 0xf8, 0x6a, 0x10, 0xa2, 0x9b, 0x44, 0x29, 0xf3, 0x31, 0x17, 0xaa, 0x03, 0xde, 0x90, 0x01, + 0x51, 0xbf, 0x1f, 0xd1, 0x66, 0xc7, 0x4b, 0xb0, 0x99, 0x70, 0x8f, 0xa7, 0x89, 0x28, 0x5a, 0x2e, + 0xb4, 0xdb, 0xc9, 0x5e, 0xd4, 0x8b, 0xe4, 0xb2, 0x29, 0x56, 0xda, 0x7a, 0x75, 0x52, 0xb1, 0x41, + 0xc2, 0x59, 0xd0, 0x49, 0x39, 0x76, 0xe3, 0x4e, 0x7e, 0xe7, 0x0a, 0x0f, 0x15, 0x68, 0xfd, 0x6d, + 0xc0, 0xdc, 0xcd, 0x94, 0xfa, 0x2b, 0xd1, 0x8d, 0x01, 0xfa, 0x29, 0x47, 0x72, 0x1a, 0x2a, 0xab, + 0x29, 0xf5, 0x5d, 0xea, 0xf5, 0xb1, 0x6e, 0x9c, 0x35, 0xce, 0x57, 0x9c, 0xb2, 0x30, 0xdc, 0xf1, + 0xfa, 0x48, 0x1c, 0x00, 0x8f, 0xf5, 0xdc, 0xc7, 0x5e, 0x98, 0x62, 0x52, 0x2f, 0x9e, 0x2d, 0x9d, + 0xaf, 0x2e, 0x5c, 0xb4, 0xf7, 0xa0, 0xd1, 0x1e, 0x03, 0xb7, 0x3f, 0x61, 0xbd, 0x7b, 0x22, 0xd6, + 0xa9, 0x78, 0x7a, 0x95, 0x10, 0x1b, 0x4e, 0x44, 0x29, 0x8f, 0x53, 0xee, 0x72, 0xaf, 0x13, 0xa2, + 0x1b, 0x33, 0x5c, 0x0d, 0x06, 0xf5, 0x92, 0x4c, 0x7d, 0x5c, 0x1d, 0xad, 0x88, 0x93, 0xb6, 0x3c, + 0x68, 0x5c, 0x82, 0x72, 0x06, 0x43, 0x08, 0xcc, 0xe4, 0xea, 0x94, 0x6b, 0x72, 0x12, 0x8e, 0xc8, + 0xfa, 0xea, 0x45, 0x69, 0x54, 0x1b, 0xeb, 0x8f, 0x19, 0x98, 0xbd, 0x1e, 0xd1, 0xd5, 0xa0, 0x97, + 0x90, 0xa7, 0x06, 0x9c, 0x8c, 0x38, 0x86, 0x2e, 0xd2, 0x6e, 0x1c, 0x05, 0x94, 0xbb, 0xbe, 0x3c, + 0x91, 0x30, 0xd5, 0x85, 0xab, 0x7b, 0x7e, 0x90, 0x06, 0xb1, 0xbf, 0x58, 0xc1, 0xf0, 0x86, 0x8e, + 0x57, 0xb6, 0xd6, 0x2b, 0xc3, 0x8d, 0x33, 0x64, 0xb7, 0xdd, 0x21, 0x22, 0xd9, 0xb8, 0x8d, 0xdc, + 0x83, 0xb9, 0x38, 0x4c, 0x7b, 0x01, 0xcd, 0x72, 0x17, 0x65, 0xee, 0xf7, 0xa7, 0xca, 0xdd, 0x96, + 0x91, 0x1a, 0xbd, 0x16, 0xe7, 0x76, 0x8d, 0xa7, 0x45, 0x98, 0x50, 0x02, 0x39, 0x05, 0xa5, 0x94, + 0x85, 0x8a, 0xa7, 0xd6, 0xec, 0x70, 0xe3, 0x4c, 0xe9, 0xae, 0x73, 0xdb, 0x11, 0x36, 0xf2, 0x0d, + 0xcc, 0x3e, 0x44, 0xaf, 0x8b, 0x2c, 0x13, 0x74, 0xf1, 0x90, 0xdf, 0x6f, 0x2f, 0x29, 0x98, 0x1b, + 0x94, 0xb3, 0x35, 0x27, 0x03, 0x25, 0x0d, 0x28, 0x07, 0x34, 0x41, 0x3f, 0x65, 0x28, 0x45, 0x2d, + 0x3b, 0xdb, 0x7b, 0x52, 0x87, 0x59, 0x1e, 0xf4, 0x31, 0x4a, 0x79, 0x7d, 0xe6, 0xac, 0x71, 0xbe, + 0xe4, 0x64, 0xdb, 0xc6, 0x35, 0xa8, 0xe5, 0xe1, 0xc8, 0x3c, 0x94, 0xbe, 0xc5, 0x35, 0x2d, 0xb4, + 0x58, 0x4e, 0xd6, 0xf9, 0x5a, 0xf1, 0x03, 0xa3, 0xe1, 0x40, 0x2d, 0xcf, 0x10, 0xb1, 0x60, 0x2e, + 0xe1, 0x1e, 0xe3, 0xae, 0x00, 0x77, 0x69, 0x22, 0x51, 0x4a, 0x4e, 0x55, 0x1a, 0x57, 0x82, 0x3e, + 0xde, 0x49, 0x88, 0x09, 0x55, 0xa4, 0xdd, 0x6d, 0x8f, 0xa2, 0xf4, 0xa8, 0x20, 0xed, 0xaa, 0x73, + 0xeb, 0xd7, 0x22, 0xd4, 0xbe, 0x4c, 0x91, 0xad, 0x39, 0xf8, 0x28, 0xc5, 0x84, 0x93, 0x87, 0xf0, + 0xb2, 0x6e, 0x60, 0x57, 0x93, 0xe3, 0x8a, 0x46, 0xc5, 0xfa, 0x11, 0x29, 0xe4, 0xa5, 0x09, 0x24, + 0x8e, 0x75, 0xa4, 0x7d, 0x5b, 0x45, 0xb7, 0xd5, 0xe1, 0xb2, 0x88, 0x75, 0x4e, 0x84, 0xbb, 0x8d, + 0xa2, 0x23, 0x1f, 0x89, 0xcc, 0x6e, 0xc2, 0x59, 0xd6, 0x91, 0xd2, 0xb0, 0xcc, 0x19, 0xf9, 0x0c, + 0x00, 0x07, 0xe8, 0xbb, 0xa2, 0x45, 0x93, 0x7a, 0x49, 0x0a, 0x78, 0x61, 0xfa, 0x8e, 0x74, 0x2a, + 0x22, 0x5a, 0x98, 0x12, 0xf2, 0x11, 0xcc, 0xaa, 0xbb, 0x98, 0x48, 0x31, 0xaa, 0x0b, 0xe7, 0xa6, + 0xb9, 0x08, 0x4e, 0x16, 0x74, 0x6b, 0xa6, 0x5c, 0x9c, 0x2f, 0x59, 0x3f, 0x18, 0x30, 0xa7, 0x89, + 0x4a, 0xe2, 0x88, 0x26, 0x48, 0xde, 0x86, 0xa3, 0xea, 0x09, 0xd3, 0xfd, 0x75, 0x42, 0xc0, 0x66, + 0xaf, 0x9b, 0xbd, 0x2c, 0x17, 0x8e, 0x76, 0x21, 0x8b, 0x30, 0x23, 0x52, 0xe8, 0x76, 0x78, 0x6f, + 0x5f, 0x16, 0x17, 0x47, 0x3b, 0x41, 0x9a, 0x23, 0xa3, 0xad, 0xdf, 0x8b, 0xf0, 0xea, 0xf5, 0xa8, + 0x1f, 0x07, 0x21, 0x7e, 0x9e, 0x72, 0x8f, 0x07, 0x11, 0x4d, 0xfe, 0x17, 0xee, 0x05, 0xc2, 0x59, + 0x6f, 0xc2, 0xfc, 0x22, 0x86, 0xc8, 0x71, 0x85, 0x79, 0x3e, 0xca, 0x8e, 0x9e, 0xf4, 0xb2, 0x5a, + 0x0f, 0xa0, 0xa6, 0x62, 0xef, 0xc6, 0x5d, 0xf1, 0x7d, 0x53, 0xf6, 0x24, 0x39, 0x07, 0xc7, 0xbc, + 0x1e, 0x52, 0xee, 0xc6, 0x51, 0x57, 0xcd, 0x15, 0xf5, 0xb8, 0xd7, 0xa4, 0xb5, 0x1d, 0x75, 0xc5, + 0x6c, 0xb1, 0x2e, 0x67, 0x55, 0xdc, 0x0c, 0x42, 0x5c, 0x96, 0x53, 0x92, 0xbc, 0x0e, 0xb5, 0x5e, + 0x18, 0x75, 0xdc, 0xd8, 0xe3, 0x1c, 0x19, 0xd5, 0xa9, 0xaa, 0xc2, 0xd6, 0x56, 0x26, 0x6b, 0xab, + 0x04, 0x2f, 0xed, 0x90, 0x9a, 0xdc, 0x87, 0x23, 0x62, 0x44, 0xa3, 0xbe, 0x45, 0xad, 0x49, 0x92, + 0x8e, 0x8f, 0x72, 0x3b, 0x60, 0x76, 0x36, 0x8f, 0x47, 0x2c, 0x2c, 0x62, 0x1c, 0x46, 0x6b, 0x7d, + 0xa4, 0x7c, 0xa9, 0xe0, 0x28, 0x48, 0xf2, 0x00, 0x8e, 0x77, 0x65, 0x99, 0x32, 0x54, 0xf9, 0xc9, + 0xef, 0xa9, 0x2e, 0xbc, 0xbb, 0x27, 0xed, 0x3b, 0x29, 0x5e, 0x2a, 0x38, 0xf3, 0xdd, 0x9d, 0xb4, + 0xb7, 0x61, 0x4e, 0xa9, 0xe2, 0xa6, 0x92, 0x63, 0x2d, 0xe8, 0x5b, 0x53, 0x08, 0xaa, 0x44, 0x59, + 0x2a, 0x38, 0x35, 0x3f, 0x2f, 0xd2, 0x57, 0x50, 0xcd, 0xfd, 0xee, 0xd0, 0x97, 0xfc, 0xca, 0x04, + 0xbc, 0x9c, 0x97, 0x60, 0x63, 0xa4, 0xc2, 0x18, 0x0b, 0xb0, 0x3a, 0x52, 0xe7, 0x6b, 0x20, 0x9a, + 0x8a, 0x7c, 0x86, 0xa3, 0x53, 0x73, 0x31, 0x4a, 0x31, 0xe2, 0x62, 0x64, 0x6b, 0x01, 0x94, 0xfb, + 0x5a, 0x51, 0xeb, 0x67, 0x03, 0xea, 0xbb, 0x1b, 0xfa, 0x30, 0x0f, 0xcc, 0x2d, 0xa8, 0x64, 0xa8, + 0xd9, 0xc0, 0x7b, 0x67, 0x1f, 0x76, 0xc7, 0xd2, 0x3a, 0xa3, 0x70, 0xeb, 0x17, 0x03, 0x4e, 0x7d, + 0x8a, 0x14, 0x99, 0xc7, 0x51, 0xcc, 0xc3, 0x65, 0x9f, 0x05, 0x31, 0xdf, 0xf7, 0xa1, 0x31, 0xfe, + 0xeb, 0x87, 0xe6, 0x35, 0x80, 0x78, 0x10, 0xba, 0x89, 0x4c, 0xaf, 0x7b, 0xaf, 0x12, 0x0f, 0x74, + 0x3d, 0xd6, 0x77, 0xd0, 0x98, 0x54, 0xe5, 0x61, 0xd8, 0x6b, 0x42, 0x55, 0xfe, 0x72, 0xca, 0xa7, + 0x6a, 0x1d, 0x1b, 0x6e, 0x9c, 0x81, 0x1c, 0x32, 0x08, 0x17, 0xb5, 0x5e, 0x78, 0x52, 0x82, 0x63, + 0x59, 0xad, 0xea, 0xc7, 0x37, 0x41, 0xf1, 0x8c, 0x48, 0x4e, 0xe5, 0x9c, 0x20, 0x7b, 0x5f, 0xee, + 0xfc, 0xd0, 0x6d, 0x5c, 0x98, 0xc6, 0x55, 0x7f, 0xd7, 0xf7, 0x30, 0xbf, 0xf3, 0xc6, 0x90, 0x4b, + 0x07, 0x51, 0x3a, 0x9b, 0x18, 0x8d, 0xcb, 0x07, 0x8c, 0xd2, 0x05, 0xfc, 0x68, 0x00, 0xd9, 0xcd, + 0x3b, 0xb9, 0xb2, 0x27, 0xda, 0x0b, 0xaf, 0x53, 0xe3, 0xea, 0x81, 0xe3, 0x54, 0x1d, 0xad, 0x8f, + 0xd7, 0x37, 0xcd, 0xc2, 0xb3, 0x4d, 0xb3, 0xf0, 0x7c, 0xd3, 0x34, 0x9e, 0x0c, 0x4d, 0xe3, 0xb7, + 0xa1, 0x69, 0xfc, 0x39, 0x34, 0x8d, 0xf5, 0xa1, 0x69, 0xfc, 0x35, 0x34, 0x8d, 0x7f, 0x86, 0x66, + 0xe1, 0xf9, 0xd0, 0x34, 0x7e, 0xda, 0x32, 0x0b, 0xeb, 0x5b, 0x66, 0xe1, 0xd9, 0x96, 0x59, 0xb8, + 0x5f, 0xd9, 0xc6, 0xee, 0x1c, 0x95, 0xff, 0x2b, 0x5c, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x30, + 0x6b, 0x7a, 0x93, 0x5c, 0x0d, 0x00, 0x00, } func (this *FuncToExecute) Equal(that interface{}) bool { @@ -1257,6 +1333,30 @@ func (this *ConfigUpdate) Equal(that interface{}) bool { } return true } +func (this *DeleteFileSource) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DeleteFileSource) + if !ok { + that2, ok := that.(DeleteFileSource) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.GlobPattern != that1.GlobPattern { + return false + } + return true +} func (this *CompileMutation) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1359,6 +1459,54 @@ func (this *CompileMutation_ConfigUpdate) Equal(that interface{}) bool { } return true } +func (this *CompileMutation_FileSource) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CompileMutation_FileSource) + if !ok { + that2, ok := that.(CompileMutation_FileSource) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.FileSource.Equal(that1.FileSource) { + return false + } + return true +} +func (this *CompileMutation_DeleteFileSource) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CompileMutation_DeleteFileSource) + if !ok { + that2, ok := that.(CompileMutation_DeleteFileSource) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.DeleteFileSource.Equal(that1.DeleteFileSource) { + return false + } + return true +} func (this *CompileMutationsResponse) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1596,11 +1744,21 @@ func (this *ConfigUpdate) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *DeleteFileSource) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&plannerpb.DeleteFileSource{") + s = append(s, "GlobPattern: "+fmt.Sprintf("%#v", this.GlobPattern)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *CompileMutation) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 9) s = append(s, "&plannerpb.CompileMutation{") if this.Mutation != nil { s = append(s, "Mutation: "+fmt.Sprintf("%#v", this.Mutation)+",\n") @@ -1632,6 +1790,22 @@ func (this *CompileMutation_ConfigUpdate) GoString() string { `ConfigUpdate:` + fmt.Sprintf("%#v", this.ConfigUpdate) + `}`}, ", ") return s } +func (this *CompileMutation_FileSource) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&plannerpb.CompileMutation_FileSource{` + + `FileSource:` + fmt.Sprintf("%#v", this.FileSource) + `}`}, ", ") + return s +} +func (this *CompileMutation_DeleteFileSource) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&plannerpb.CompileMutation_DeleteFileSource{` + + `DeleteFileSource:` + fmt.Sprintf("%#v", this.DeleteFileSource) + `}`}, ", ") + return s +} func (this *CompileMutationsResponse) GoString() string { if this == nil { return "nil" @@ -2323,6 +2497,36 @@ func (m *ConfigUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *DeleteFileSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteFileSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteFileSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.GlobPattern) > 0 { + i -= len(m.GlobPattern) + copy(dAtA[i:], m.GlobPattern) + i = encodeVarintService(dAtA, i, uint64(len(m.GlobPattern))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *CompileMutation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2418,6 +2622,48 @@ func (m *CompileMutation_ConfigUpdate) MarshalToSizedBuffer(dAtA []byte) (int, e } return len(dAtA) - i, nil } +func (m *CompileMutation_FileSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompileMutation_FileSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FileSource != nil { + { + size, err := m.FileSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *CompileMutation_DeleteFileSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CompileMutation_DeleteFileSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DeleteFileSource != nil { + { + size, err := m.DeleteFileSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} func (m *CompileMutationsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2766,6 +3012,19 @@ func (m *ConfigUpdate) Size() (n int) { return n } +func (m *DeleteFileSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.GlobPattern) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + func (m *CompileMutation) Size() (n int) { if m == nil { return 0 @@ -2814,6 +3073,30 @@ func (m *CompileMutation_ConfigUpdate) Size() (n int) { } return n } +func (m *CompileMutation_FileSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FileSource != nil { + l = m.FileSource.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} +func (m *CompileMutation_DeleteFileSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeleteFileSource != nil { + l = m.DeleteFileSource.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} func (m *CompileMutationsResponse) Size() (n int) { if m == nil { return 0 @@ -3015,6 +3298,16 @@ func (this *ConfigUpdate) String() string { }, "") return s } +func (this *DeleteFileSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteFileSource{`, + `GlobPattern:` + fmt.Sprintf("%v", this.GlobPattern) + `,`, + `}`, + }, "") + return s +} func (this *CompileMutation) String() string { if this == nil { return "nil" @@ -3055,6 +3348,26 @@ func (this *CompileMutation_ConfigUpdate) String() string { }, "") return s } +func (this *CompileMutation_FileSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CompileMutation_FileSource{`, + `FileSource:` + strings.Replace(fmt.Sprintf("%v", this.FileSource), "FileSourceDeployment", "ir.FileSourceDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CompileMutation_DeleteFileSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CompileMutation_DeleteFileSource{`, + `DeleteFileSource:` + strings.Replace(fmt.Sprintf("%v", this.DeleteFileSource), "DeleteFileSource", "DeleteFileSource", 1) + `,`, + `}`, + }, "") + return s +} func (this *CompileMutationsResponse) String() string { if this == nil { return "nil" @@ -4547,6 +4860,88 @@ func (m *ConfigUpdate) Unmarshal(dAtA []byte) error { } return nil } +func (m *DeleteFileSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteFileSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteFileSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobPattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GlobPattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *CompileMutation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4681,6 +5076,76 @@ func (m *CompileMutation) Unmarshal(dAtA []byte) error { } m.Mutation = &CompileMutation_ConfigUpdate{v} iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ir.FileSourceDeployment{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mutation = &CompileMutation_FileSource{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteFileSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DeleteFileSource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mutation = &CompileMutation_DeleteFileSource{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) diff --git a/src/carnot/planner/plannerpb/service.proto b/src/carnot/planner/plannerpb/service.proto index a9b33d825f8..0c3b63aad94 100644 --- a/src/carnot/planner/plannerpb/service.proto +++ b/src/carnot/planner/plannerpb/service.proto @@ -23,6 +23,7 @@ package px.carnot.planner.plannerpb; option go_package = "plannerpb"; import "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.proto"; +import "src/carnot/planner/file_source/ir/logical.proto"; import "src/common/base/statuspb/status.proto"; import "gogoproto/gogo.proto"; import "src/carnot/planner/distributedpb/distributed_plan.proto"; @@ -129,6 +130,11 @@ message ConfigUpdate { string agent_pod_name = 3; } +message DeleteFileSource { + // The glob pattern to use to find files to read. Also doubles as the name of the file source. + string glob_pattern = 1; +} + // The definition of a mutation to perfom on Vizier. Mutations include operations // that add and delete tables to the database. message CompileMutation { @@ -140,6 +146,10 @@ message CompileMutation { DeleteTracepoint delete_tracepoint = 3; // Mutation that sets a config. ConfigUpdate config_update = 4; + // Mutation that adds a file source/poller + carnot.planner.file_source.ir.FileSourceDeployment file_source = 5; + // Mutation that deletes a file source/poller + DeleteFileSource delete_file_source = 6; } } diff --git a/src/carnot/planner/probes/BUILD.bazel b/src/carnot/planner/probes/BUILD.bazel index f9fee130715..bd98b0fb8d6 100644 --- a/src/carnot/planner/probes/BUILD.bazel +++ b/src/carnot/planner/probes/BUILD.bazel @@ -37,6 +37,7 @@ pl_cc_library( hdrs = ["probes.h"], deps = [ "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", + "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/carnot/planner/objects:cc_library", "//src/common/uuid:cc_library", ], diff --git a/src/carnot/planner/probes/probes.cc b/src/carnot/planner/probes/probes.cc index fbe21a674d5..942abab414c 100644 --- a/src/carnot/planner/probes/probes.cc +++ b/src/carnot/planner/probes/probes.cc @@ -108,6 +108,14 @@ std::vector MutationsIR::Deployments() { return deployments; } +std::vector MutationsIR::FileSourceDeployments() { + std::vector file_source_deployments; + for (size_t i = 0; i < file_source_deployments_.size(); i++) { + file_source_deployments.push_back(file_source_deployments_[i]); + } + return file_source_deployments; +} + std::shared_ptr MutationsIR::StartProbe(const std::string& function_name) { auto tracepoint_ir = std::make_shared(function_name); probes_pool_.push_back(tracepoint_ir); @@ -292,15 +300,35 @@ Status MutationsIR::ToProto(plannerpb::CompileMutationsResponse* pb) { pb->add_mutations()->mutable_delete_tracepoint()->set_name(tracepoint_to_delete); } + for (const auto& file_source_to_delete : FileSourcesToDelete()) { + pb->add_mutations()->mutable_delete_file_source()->set_glob_pattern(file_source_to_delete); + } + for (const auto& update : config_updates_) { *(pb->add_mutations()->mutable_config_update()) = update; } + for (const auto& file_source : file_source_deployments_) { + *(pb->add_mutations()->mutable_file_source()) = file_source; + } + return Status::OK(); } void MutationsIR::EndProbe() { current_tracepoint_ = nullptr; } +void MutationsIR::CreateFileSourceDeployment(const std::string& glob_pattern, + const std::string& table_name, int64_t ttl_ns) { + file_source::ir::FileSourceDeployment file_source; + file_source.set_name(glob_pattern); + file_source.set_glob_pattern(glob_pattern); + file_source.set_table_name(table_name); + auto one_sec = std::chrono::duration_cast(std::chrono::seconds(1)); + file_source.mutable_ttl()->set_seconds(ttl_ns / one_sec.count()); + file_source.mutable_ttl()->set_nanos(ttl_ns % one_sec.count()); + file_source_deployments_.push_back(file_source); +} + } // namespace compiler } // namespace planner } // namespace carnot diff --git a/src/carnot/planner/probes/probes.h b/src/carnot/planner/probes/probes.h index 3578cdb33d6..3d90992402b 100644 --- a/src/carnot/planner/probes/probes.h +++ b/src/carnot/planner/probes/probes.h @@ -23,6 +23,7 @@ #include #include "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.pb.h" +#include "src/carnot/planner/file_source/ir/logical.pb.h" #include "src/carnot/planner/objects/funcobject.h" #include "src/carnot/planner/plannerpb/service.pb.h" #include "src/carnot/planner/probes/label_selector_target.h" @@ -166,6 +167,20 @@ class TracepointIR { std::shared_ptr output_ = nullptr; }; +class FileSourceDeployment { + public: + FileSourceDeployment(const std::string& glob_pattern, const std::string& table_name, + int64_t ttl_ns) + : glob_pattern_(glob_pattern), table_name_(table_name), ttl_ns_(ttl_ns) {} + + Status ToProto(file_source::ir::FileSourceDeployment pb) const; + + private: + std::string glob_pattern_; + std::string table_name_; + int64_t ttl_ns_; +}; + class TracepointDeployment { public: TracepointDeployment(const std::string& trace_name, int64_t ttl_ns) @@ -225,6 +240,10 @@ class MutationsIR { */ std::shared_ptr StartProbe(const std::string& function_name); + void CreateFileSourceDeployment(const std::string& glob_pattern, const std::string& table_name, + int64_t ttl_ns); + + void CreateDeleteFileSource(const std::string& glob_pattern); /** * @brief Create a TraceProgram for the MutationsIR w/ the specified UPID. * @@ -331,6 +350,19 @@ class MutationsIR { std::vector Deployments(); + std::vector FileSourceDeployments(); + + /** + * @brief Deletes the file source passed in. + * + * @param file_source_to_delete + */ + void DeleteFileSource(const std::string& file_source_to_delete) { + file_sources_to_delete_.push_back(file_source_to_delete); + } + + const std::vector& FileSourcesToDelete() { return file_sources_to_delete_; } + private: // All the new tracepoints added as part of this mutation. DeploymentSpecs are protobufs because // we only modify these upon inserting the new tracepoint, while the Tracepoint definition is @@ -348,6 +380,9 @@ class MutationsIR { // The updates to internal config that need to be done. std::vector config_updates_; + + std::vector file_source_deployments_; + std::vector file_sources_to_delete_; }; } // namespace compiler diff --git a/src/carnot/planner/probes/tracepoint_generator.cc b/src/carnot/planner/probes/tracepoint_generator.cc index bd2f817b035..3dc23bd2c66 100644 --- a/src/carnot/planner/probes/tracepoint_generator.cc +++ b/src/carnot/planner/probes/tracepoint_generator.cc @@ -28,14 +28,16 @@ #include "src/carnot/planner/probes/probes.h" #include "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.pb.h" +#include "src/carnot/planner/file_source/ir/logical.pb.h" namespace px { namespace carnot { namespace planner { namespace compiler { -StatusOr CompileTracepoint( - std::string_view query) { +namespace { + +StatusOr CompileMutations(std::string_view query) { // Create a compiler state; it doesn't affect the tracepoint compilation. // TODO(oazizi): Try inserting nullptr for registry_info. px::carnot::planner::RegistryInfo registry_info; @@ -65,10 +67,22 @@ StatusOr Co if (pb.mutations_size() != 1) { return error::Internal("Unexpected number of mutations"); } + return pb; +} + +} // namespace +StatusOr CompileTracepoint( + std::string_view query) { + PX_ASSIGN_OR_RETURN(auto pb, CompileMutations(query)); return pb.mutations()[0].trace(); } +StatusOr CompileFileSource(std::string_view query) { + PX_ASSIGN_OR_RETURN(auto pb, CompileMutations(query)); + return pb.mutations()[0].file_source(); +} + } // namespace compiler } // namespace planner } // namespace carnot diff --git a/src/carnot/planner/probes/tracepoint_generator.h b/src/carnot/planner/probes/tracepoint_generator.h index 7cc4a957515..0894d92bbec 100644 --- a/src/carnot/planner/probes/tracepoint_generator.h +++ b/src/carnot/planner/probes/tracepoint_generator.h @@ -33,6 +33,12 @@ namespace compiler { StatusOr CompileTracepoint( std::string_view query); +/** + * Take a file source specification in PXL format, and compiles it to a logical file source + * deployment. + */ +StatusOr CompileFileSource(std::string_view query); + } // namespace compiler } // namespace planner } // namespace carnot diff --git a/src/carnot/planner/test_utils.h b/src/carnot/planner/test_utils.h index 84a5f94c8fe..8ba8af3aabb 100644 --- a/src/carnot/planner/test_utils.h +++ b/src/carnot/planner/test_utils.h @@ -286,6 +286,95 @@ relation_map { } )proto"; +constexpr char kFileSourceSchema[] = R"proto( +relation_map { + key: "kern.log" + value { + columns { + column_name: "time_" + column_type: TIME64NS + column_semantic_type: ST_NONE + } + columns { + column_name: "upid" + column_type: UINT128 + column_semantic_type: ST_UPID + } + columns { + column_name: "service" + column_type: STRING + column_semantic_type: ST_NONE + } + columns { + column_name: "resp_latency_ns" + column_type: INT64 + column_semantic_type: ST_DURATION_NS + } + mutation_id: "mutation" + } +} +relation_map { + key: "cpu" + value { + columns { + column_name: "count" + column_type: INT64 + column_semantic_type: ST_NONE + } + columns { + column_name: "cpu0" + column_type: FLOAT64 + column_semantic_type: ST_NONE + } + columns { + column_name: "cpu1" + column_type: FLOAT64 + column_semantic_type: ST_NONE + } + columns { + column_name: "cpu2" + column_type: FLOAT64 + column_semantic_type: ST_NONE + } + } +} +relation_map { + key: "process_stats" + value { + columns { + column_name: "upid" + column_type: UINT128 + column_semantic_type: ST_UPID + } + columns { + column_name: "cpu_ktime_ns" + column_type: INT64 + column_semantic_type: ST_NONE + } + columns { + column_name: "cpu_utime_ns" + column_type: INT64 + column_semantic_type: ST_NONE + } + } +} +relation_map { + key: "only_pem1" + value { + columns { + column_name: "time_" + column_type: TIME64NS + column_semantic_type: ST_NONE + } + columns { + column_name: "upid" + column_type: UINT128 + column_semantic_type: ST_NONE + } + } +} +)proto"; + constexpr char kConnStatsSchema[] = R"proto( relation_map { key: "conn_stats" @@ -1144,6 +1233,229 @@ schema_info { } )proto"; +constexpr char kThreePEMsOneKelvinAllHasDataStoreDistributedState[] = R"proto( +carnot_info { + query_broker_address: "pem1" + agent_id { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000001 + } + has_grpc_server: false + has_data_store: true + processes_data: true + accepts_remote_sources: false + asid: 123 + table_info { + table: "table" + } +} +carnot_info { + query_broker_address: "pem2" + agent_id { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000002 + } + has_grpc_server: false + has_data_store: true + processes_data: true + accepts_remote_sources: false + asid: 789 + table_info { + table: "table" + } +} +carnot_info { + query_broker_address: "pem3" + agent_id { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000003 + } + has_grpc_server: false + has_data_store: true + processes_data: true + accepts_remote_sources: false + asid: 111 + table_info { + table: "table" + } +} +carnot_info { + query_broker_address: "kelvin" + agent_id { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000004 + } + grpc_address: "1111" + has_grpc_server: true + has_data_store: true + processes_data: true + accepts_remote_sources: true + asid: 456 + ssl_targetname: "kelvin.pl.svc" +} +schema_info { + name: "table" + relation { + columns { + column_name: "time_" + column_type: TIME64NS + column_semantic_type: ST_NONE + } + columns { + column_name: "cpu_cycles" + column_type: INT64 + column_semantic_type: ST_NONE + } + columns { + column_name: "upid" + column_type: UINT128 + column_semantic_type: ST_NONE + } + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000001 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000002 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000003 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000004 + } +} +schema_info { + name: "cql_events" + relation { + columns { + column_name: "time_" + column_type: TIME64NS + column_semantic_type: ST_NONE + } + columns { + column_name: "upid" + column_type: UINT128 + column_semantic_type: ST_NONE + } + columns { + column_name: "remote_addr" + column_type: STRING + column_semantic_type: ST_NONE + } + columns { + column_name: "remote_port" + column_type: INT64 + column_semantic_type: ST_NONE + } + columns { + column_name: "trace_role" + column_type: INT64 + column_semantic_type: ST_NONE + } + columns { + column_name: "latency" + column_type: INT64 + column_semantic_type: ST_NONE + } + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000001 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000002 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000003 + } +} +schema_info { + name: "http_events" + relation { + columns { + column_name: "time_" + column_type: TIME64NS + column_semantic_type: ST_NONE + } + columns { + column_name: "upid" + column_type: UINT128 + column_semantic_type: ST_NONE + } + columns { + column_name: "local_addr" + column_type: STRING + column_semantic_type: ST_NONE + } + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000001 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000002 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000003 + } +} +schema_info { + name: "process_stats" + relation { + columns { + column_name: "time_" + column_type: TIME64NS + column_semantic_type: ST_NONE + } + columns { + column_name: "upid" + column_type: UINT128 + column_semantic_type: ST_NONE + } + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000001 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000002 + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000003 + } +} +schema_info { + name: "only_pem1" + relation { + columns { + column_name: "time_" + column_type: TIME64NS + column_semantic_type: ST_NONE + } + columns { + column_name: "upid" + column_type: UINT128 + column_semantic_type: ST_NONE + } + } + agent_list { + high_bits: 0x0000000100000000 + low_bits: 0x0000000000000001 + } +} +)proto"; + constexpr char kOnePEMOneKelvinDistributedState[] = R"proto( carnot_info { agent_id { diff --git a/src/carnot/planpb/plan.pb.go b/src/carnot/planpb/plan.pb.go index ce6671091c1..ef00e006fa3 100755 --- a/src/carnot/planpb/plan.pb.go +++ b/src/carnot/planpb/plan.pb.go @@ -526,7 +526,8 @@ type Operator struct { // *Operator_UdtfSourceOp // *Operator_EmptySourceOp // *Operator_OTelSinkOp - Op isOperator_Op `protobuf_oneof:"op"` + Op isOperator_Op `protobuf_oneof:"op"` + Context map[string]string `protobuf:"bytes,15,rep,name=context,proto3" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *Operator) Reset() { *m = Operator{} } @@ -727,6 +728,13 @@ func (m *Operator) GetOTelSinkOp() *OTelExportSinkOperator { return nil } +func (m *Operator) GetContext() map[string]string { + if m != nil { + return m.Context + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Operator) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -3238,6 +3246,7 @@ func init() { proto.RegisterType((*DAG_DAGNode)(nil), "px.carnot.planpb.DAG.DAGNode") proto.RegisterType((*PlanNode)(nil), "px.carnot.planpb.PlanNode") proto.RegisterType((*Operator)(nil), "px.carnot.planpb.Operator") + proto.RegisterMapType((map[string]string)(nil), "px.carnot.planpb.Operator.ContextEntry") proto.RegisterType((*MemorySourceOperator)(nil), "px.carnot.planpb.MemorySourceOperator") proto.RegisterType((*MemorySinkOperator)(nil), "px.carnot.planpb.MemorySinkOperator") proto.RegisterType((*GRPCSourceOperator)(nil), "px.carnot.planpb.GRPCSourceOperator") @@ -3278,213 +3287,216 @@ func init() { func init() { proto.RegisterFile("src/carnot/planpb/plan.proto", fileDescriptor_e5dcfc8666ec3f33) } var fileDescriptor_e5dcfc8666ec3f33 = []byte{ - // 3294 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x39, 0x4b, 0x6c, 0x1b, 0xc7, - 0xd9, 0x5c, 0x92, 0xe2, 0xe3, 0xe3, 0x53, 0x63, 0xc9, 0x96, 0x69, 0x9b, 0x72, 0x18, 0xfb, 0xb7, - 0xe2, 0x3f, 0xa1, 0x6c, 0xd9, 0xf1, 0xef, 0x38, 0xce, 0x9f, 0x50, 0x12, 0x25, 0x51, 0x91, 0x44, - 0x75, 0x44, 0x25, 0x4d, 0x1b, 0x74, 0xb1, 0xe2, 0x8e, 0xd6, 0x1b, 0x93, 0xbb, 0x9b, 0x7d, 0xd8, - 0x52, 0x80, 0xa2, 0x29, 0x7a, 0xe9, 0x21, 0x87, 0x1e, 0x7a, 0x28, 0x7a, 0x6f, 0x91, 0x4b, 0x8b, - 0x1c, 0x7a, 0xec, 0xa1, 0x05, 0x0a, 0xa4, 0x87, 0x22, 0x70, 0x7b, 0xca, 0xc9, 0x88, 0x95, 0x8b, - 0x0f, 0x45, 0x91, 0xde, 0x7b, 0x28, 0xe6, 0xb1, 0xe4, 0x52, 0xbb, 0xb2, 0x94, 0xb4, 0x28, 0xd0, - 0x83, 0xc4, 0x9d, 0xef, 0x35, 0xdf, 0x7b, 0xbe, 0xd9, 0x85, 0xf3, 0x8e, 0xdd, 0x9d, 0xed, 0x2a, - 0xb6, 0x61, 0xba, 0xb3, 0x56, 0x4f, 0x31, 0xac, 0x1d, 0xf6, 0x53, 0xb7, 0x6c, 0xd3, 0x35, 0x51, - 0xd9, 0xda, 0xab, 0x73, 0x64, 0x9d, 0x23, 0x2b, 0x13, 0x9a, 0xa9, 0x99, 0x0c, 0x39, 0x4b, 0x9f, - 0x38, 0x5d, 0xa5, 0xaa, 0x99, 0xa6, 0xd6, 0x23, 0xb3, 0x6c, 0xb5, 0xe3, 0xed, 0xce, 0x3e, 0xb4, - 0x15, 0xcb, 0x22, 0xb6, 0x23, 0xf0, 0xd3, 0x74, 0x17, 0xc5, 0xd2, 0x39, 0xc1, 0xac, 0xe7, 0xe9, - 0xaa, 0xb5, 0xc3, 0x7e, 0x04, 0xc1, 0x25, 0x4a, 0xe0, 0xdc, 0x53, 0x6c, 0xa2, 0xce, 0xba, 0xfb, - 0x16, 0x71, 0xf8, 0x7f, 0x6b, 0x87, 0xff, 0x72, 0xaa, 0xda, 0x0f, 0x25, 0xc8, 0x6d, 0xf6, 0x14, - 0xa3, 0x6d, 0xb9, 0xba, 0x69, 0x38, 0x68, 0x0a, 0xd2, 0x64, 0xcf, 0xea, 0x29, 0xba, 0x31, 0x15, - 0xbf, 0x28, 0xcd, 0x64, 0xb0, 0xbf, 0xa4, 0x18, 0xc5, 0x50, 0x7a, 0xfb, 0x1f, 0x90, 0xa9, 0x04, - 0xc7, 0x88, 0x25, 0xba, 0x0d, 0x67, 0xfb, 0xca, 0x9e, 0x6c, 0x7a, 0xae, 0xe5, 0xb9, 0xb2, 0x6d, - 0x3e, 0x74, 0x64, 0x8b, 0xd8, 0xb2, 0xab, 0xec, 0xf4, 0xc8, 0x54, 0xf2, 0xa2, 0x34, 0x93, 0xc0, - 0x93, 0x7d, 0x65, 0xaf, 0xcd, 0xf0, 0xd8, 0x7c, 0xe8, 0x6c, 0x12, 0xbb, 0x43, 0x91, 0xab, 0xc9, - 0x8c, 0x54, 0x8e, 0xd7, 0x9e, 0x24, 0x20, 0x49, 0x75, 0x40, 0x57, 0x20, 0xa1, 0x2a, 0xda, 0x94, - 0x74, 0x51, 0x9a, 0xc9, 0xcd, 0x4d, 0xd6, 0x0f, 0x7b, 0xaa, 0xbe, 0xd8, 0x58, 0xc6, 0x94, 0x02, - 0xdd, 0x84, 0x31, 0xc3, 0x54, 0x89, 0x33, 0x15, 0xbf, 0x98, 0x98, 0xc9, 0xcd, 0x55, 0xc3, 0xa4, - 0x54, 0xde, 0x92, 0xad, 0x68, 0x7d, 0x62, 0xb8, 0x98, 0x13, 0xa3, 0x37, 0x20, 0x4f, 0xb1, 0xb2, - 0xc9, 0x6d, 0x65, 0xaa, 0xe5, 0xe6, 0x2e, 0x44, 0x33, 0x0b, 0x87, 0xe0, 0x9c, 0x15, 0xf0, 0xce, - 0x16, 0x20, 0xdd, 0xe8, 0x9a, 0x7d, 0xdd, 0xd0, 0x64, 0x45, 0x23, 0x86, 0x2b, 0xeb, 0xaa, 0x33, - 0x35, 0xc6, 0x94, 0x28, 0x51, 0x39, 0x3c, 0x0c, 0xf5, 0xed, 0xed, 0xd6, 0xe2, 0xfc, 0xc4, 0xc1, - 0xe3, 0xe9, 0x72, 0x4b, 0x90, 0x37, 0x28, 0x75, 0x6b, 0xd1, 0xc1, 0x65, 0x7d, 0x04, 0xa2, 0x3a, - 0xc8, 0x83, 0x0b, 0x64, 0x8f, 0x74, 0x3d, 0xba, 0x85, 0xec, 0xb8, 0x8a, 0xeb, 0x39, 0xb2, 0x4a, - 0x1c, 0x57, 0x37, 0x14, 0xae, 0x67, 0x8a, 0xc9, 0xbf, 0x1e, 0xad, 0x67, 0xbd, 0xe9, 0xf3, 0x6e, - 0x31, 0xd6, 0xc5, 0x21, 0x27, 0x3e, 0x47, 0x8e, 0xc4, 0x39, 0x95, 0x5d, 0xa8, 0x1c, 0xcd, 0x8a, - 0x9e, 0x83, 0xbc, 0x66, 0x5b, 0x5d, 0x59, 0x51, 0x55, 0x9b, 0x38, 0x0e, 0x8b, 0x49, 0x16, 0xe7, - 0x28, 0xac, 0xc1, 0x41, 0xe8, 0x32, 0x14, 0x1d, 0xa7, 0x27, 0xbb, 0x8a, 0xad, 0x11, 0xd7, 0x50, - 0xfa, 0x84, 0x65, 0x4c, 0x16, 0x17, 0x1c, 0xa7, 0xd7, 0x19, 0x00, 0x57, 0x93, 0x99, 0x44, 0x39, - 0x59, 0xdb, 0x87, 0x7c, 0x30, 0x24, 0xa8, 0x08, 0x71, 0x5d, 0x65, 0x52, 0x93, 0x38, 0xae, 0xab, - 0x7e, 0xe8, 0xe3, 0xc7, 0x86, 0xfe, 0x9a, 0x1f, 0xfa, 0x04, 0xf3, 0x4a, 0x25, 0xda, 0x2b, 0x1b, - 0xa6, 0x4a, 0x44, 0xd8, 0x6b, 0xbf, 0x90, 0x20, 0xb1, 0xd8, 0x58, 0x46, 0x37, 0x7c, 0x4e, 0x89, - 0x71, 0x5e, 0x88, 0xdc, 0x84, 0xfe, 0x05, 0x98, 0x2b, 0x3a, 0xa4, 0x05, 0x24, 0xa4, 0x32, 0xb5, - 0xdf, 0xb4, 0x5d, 0xa2, 0xca, 0x96, 0x62, 0x13, 0xc3, 0xa5, 0x09, 0x95, 0x98, 0x49, 0xe2, 0x02, - 0x87, 0x6e, 0x72, 0x20, 0xba, 0x02, 0x25, 0x41, 0xd6, 0xbd, 0xa7, 0xf7, 0x54, 0x9b, 0x18, 0x4c, - 0xf5, 0x24, 0x16, 0xdc, 0x0b, 0x02, 0x5a, 0x5b, 0x82, 0x8c, 0xaf, 0x7a, 0x68, 0xaf, 0xab, 0x10, - 0x37, 0x2d, 0xe1, 0x9d, 0x08, 0x93, 0xdb, 0x16, 0xb1, 0x15, 0xd7, 0xb4, 0x71, 0xdc, 0xb4, 0x6a, - 0x3f, 0xca, 0x40, 0xc6, 0x07, 0xa0, 0xff, 0x83, 0xb4, 0x69, 0xc9, 0xb4, 0xe2, 0x99, 0xb4, 0x62, - 0x54, 0xad, 0xf8, 0xc4, 0x9d, 0x7d, 0x8b, 0xe0, 0x94, 0x69, 0xd1, 0x5f, 0xb4, 0x06, 0x85, 0x3e, - 0xe9, 0xcb, 0x8e, 0xe9, 0xd9, 0x5d, 0x22, 0x0f, 0x36, 0xff, 0x9f, 0x30, 0xfb, 0x3a, 0xe9, 0x9b, - 0xf6, 0xfe, 0x16, 0x23, 0xf4, 0x45, 0xad, 0xc4, 0x70, 0xae, 0x4f, 0xfa, 0x3e, 0x10, 0xdd, 0x82, - 0x54, 0x5f, 0xb1, 0xa8, 0x98, 0xc4, 0x51, 0x45, 0xb7, 0xae, 0x58, 0x01, 0xee, 0xb1, 0x3e, 0x5d, - 0xa2, 0xbb, 0x90, 0x52, 0x34, 0x8d, 0xf2, 0xf1, 0x62, 0x7d, 0x3e, 0xcc, 0xd7, 0xd0, 0x34, 0x9b, - 0x68, 0x8a, 0x1b, 0xdc, 0x7b, 0x4c, 0xd1, 0xb4, 0xb6, 0x85, 0x96, 0x20, 0xc7, 0x6c, 0xd0, 0x8d, - 0xfb, 0x54, 0xc4, 0x18, 0x13, 0x71, 0xe9, 0x48, 0x0b, 0x74, 0xe3, 0x7e, 0x40, 0x46, 0x96, 0xea, - 0xcf, 0x40, 0xe8, 0x75, 0xc8, 0xee, 0xea, 0x3d, 0x97, 0xd8, 0x54, 0x4a, 0x8a, 0x49, 0xb9, 0x18, - 0x96, 0xb2, 0xc4, 0x48, 0x02, 0x12, 0x32, 0xbb, 0x02, 0x82, 0xee, 0x42, 0xa6, 0xa7, 0xf7, 0x75, - 0x97, 0xf2, 0xa7, 0x19, 0xff, 0x74, 0x98, 0x7f, 0x8d, 0x52, 0x04, 0xd8, 0xd3, 0x3d, 0x0e, 0xa0, - 0xdc, 0x9e, 0x41, 0x9b, 0x83, 0x69, 0x4d, 0x65, 0x8e, 0xe2, 0xde, 0xa6, 0x14, 0x41, 0x6e, 0x8f, - 0x03, 0xd0, 0xf7, 0xa0, 0xc8, 0x2a, 0x79, 0x18, 0xc9, 0xec, 0x51, 0x7e, 0x58, 0xc6, 0x9b, 0x0b, - 0xa3, 0x71, 0x9c, 0x2f, 0x1f, 0x3c, 0x9e, 0xce, 0x07, 0xe1, 0x2b, 0x31, 0xcc, 0x3a, 0xc3, 0x20, - 0xb4, 0x6f, 0x8b, 0x4e, 0xe1, 0x7b, 0xf9, 0x29, 0x37, 0xb0, 0x76, 0x84, 0xf8, 0x80, 0x93, 0xe7, - 0x8b, 0x07, 0x8f, 0xa7, 0x61, 0x08, 0x5d, 0x89, 0x61, 0x60, 0xa2, 0xb9, 0xd7, 0x5f, 0x81, 0xf4, - 0x7b, 0xa6, 0xce, 0xac, 0xce, 0x31, 0x91, 0x11, 0xa9, 0xbb, 0x6a, 0xea, 0x41, 0xa3, 0x53, 0xef, - 0xb1, 0x35, 0x5a, 0x83, 0xa2, 0xa7, 0xba, 0xbb, 0x01, 0x9b, 0xf3, 0x47, 0xd9, 0xbc, 0xbd, 0xd8, - 0x59, 0x0a, 0xe5, 0x6e, 0x9e, 0x72, 0x0f, 0x2c, 0x6c, 0x43, 0x89, 0xf4, 0x2d, 0x77, 0x3f, 0x20, - 0xae, 0xc0, 0xc4, 0x5d, 0x0e, 0x8b, 0x6b, 0x52, 0xc2, 0x90, 0xbc, 0x02, 0x09, 0x82, 0xd1, 0xbb, - 0x90, 0x37, 0x5d, 0xd2, 0x1b, 0xb8, 0xac, 0xc8, 0xa4, 0xcd, 0x44, 0x54, 0x66, 0x87, 0xf4, 0x9a, - 0x7b, 0x96, 0x69, 0xbb, 0x61, 0xbf, 0x51, 0xdc, 0xd0, 0x6f, 0x54, 0x1e, 0x5f, 0xcd, 0x27, 0x69, - 0xaf, 0xa8, 0xfd, 0x39, 0x0e, 0x13, 0x51, 0x95, 0x89, 0x10, 0x24, 0x59, 0xb3, 0xe6, 0x1d, 0x9d, - 0x3d, 0xa3, 0x69, 0xc8, 0x75, 0xcd, 0x9e, 0xd7, 0x37, 0x64, 0x5d, 0xdd, 0xe3, 0xa7, 0x6a, 0x02, - 0x03, 0x07, 0xb5, 0xd4, 0x3d, 0x87, 0x1e, 0x07, 0x82, 0x80, 0xd2, 0xf3, 0xe6, 0x9b, 0xc5, 0x82, - 0x69, 0x83, 0x82, 0xd0, 0xcb, 0x03, 0x12, 0x36, 0x5f, 0xb0, 0x66, 0x58, 0x9c, 0x43, 0xd4, 0x28, - 0x3e, 0x70, 0x2c, 0x2a, 0xae, 0xc2, 0x5a, 0x8c, 0x60, 0xa3, 0xcf, 0x0e, 0xba, 0x03, 0xe0, 0xb8, - 0x8a, 0xed, 0xca, 0xae, 0xde, 0x27, 0xa2, 0x44, 0xcf, 0xd5, 0xf9, 0xf0, 0x53, 0xf7, 0x87, 0x9f, - 0x7a, 0xcb, 0x70, 0x6f, 0xdd, 0x7c, 0x4b, 0xe9, 0x79, 0x04, 0x67, 0x19, 0x79, 0x47, 0xef, 0xd3, - 0xc1, 0x23, 0xeb, 0xb8, 0xb4, 0xbd, 0x51, 0xd6, 0xd4, 0xf1, 0xac, 0x19, 0x4a, 0xcd, 0x38, 0x4f, - 0x43, 0x8a, 0x8d, 0x27, 0x2e, 0x2b, 0xc7, 0x2c, 0x16, 0x2b, 0x74, 0x9e, 0x4a, 0xb4, 0x89, 0x42, - 0x0f, 0x68, 0x56, 0x6b, 0x19, 0x3c, 0x04, 0xd4, 0x3e, 0x93, 0x00, 0x85, 0x7b, 0x45, 0xa4, 0x47, - 0x0f, 0x7b, 0x23, 0x7e, 0x32, 0x6f, 0x9c, 0xc0, 0xcf, 0xab, 0x30, 0x29, 0x48, 0x1c, 0xd2, 0x57, - 0x0c, 0x57, 0xef, 0x8e, 0x38, 0xfc, 0xf4, 0x70, 0x8b, 0x2d, 0x81, 0x67, 0xdb, 0x9c, 0xe2, 0x4c, - 0x41, 0x98, 0x53, 0x33, 0x00, 0x85, 0x6b, 0x3e, 0xa4, 0xbb, 0xf4, 0xcd, 0x74, 0x8f, 0x87, 0x74, - 0xaf, 0x7d, 0x96, 0x84, 0xf2, 0xe1, 0x2e, 0xc0, 0x06, 0xcb, 0x91, 0x29, 0xc3, 0x5f, 0xa2, 0xdb, - 0xa3, 0xad, 0x4b, 0x57, 0xd9, 0xe9, 0x91, 0x3c, 0xdc, 0x94, 0x5a, 0x8b, 0xa3, 0x4d, 0xa9, 0xa5, - 0xa2, 0x2d, 0xc8, 0x8b, 0x71, 0x74, 0x38, 0x85, 0xe6, 0xe6, 0xea, 0xc7, 0xf7, 0xa4, 0x3a, 0x26, - 0x8e, 0xd7, 0x73, 0xd9, 0x78, 0x4a, 0x0f, 0x31, 0x2e, 0x85, 0x2d, 0x91, 0x06, 0xa8, 0x6b, 0x1a, - 0x06, 0xe9, 0xba, 0xbc, 0x19, 0xf3, 0xe9, 0x8c, 0xa7, 0xec, 0xed, 0x13, 0x88, 0xa6, 0x80, 0x85, - 0x81, 0x00, 0x7f, 0xc0, 0x1c, 0xef, 0x1e, 0x06, 0x55, 0xfe, 0x22, 0x41, 0x2e, 0xa0, 0x07, 0xba, - 0x00, 0xc0, 0xcc, 0x90, 0x03, 0x69, 0x96, 0x65, 0x90, 0x8d, 0xff, 0x9a, 0x5c, 0xab, 0xfc, 0x3f, - 0x4c, 0x46, 0x3a, 0x20, 0x62, 0x8e, 0x94, 0x22, 0xe6, 0xc8, 0xf9, 0x02, 0xe4, 0x02, 0x53, 0xf1, - 0x6a, 0x32, 0x13, 0x2f, 0x27, 0x6a, 0x0f, 0x20, 0x17, 0x98, 0x1b, 0xd0, 0x22, 0xe4, 0xc8, 0x9e, - 0x45, 0x73, 0x87, 0x85, 0x86, 0x0f, 0x7a, 0x11, 0x27, 0xd1, 0x56, 0x57, 0xe9, 0x29, 0x76, 0x73, - 0x40, 0x8a, 0x83, 0x6c, 0x27, 0x49, 0xe4, 0x5f, 0xc7, 0x61, 0x3c, 0x34, 0x78, 0xa0, 0xd7, 0x20, - 0xf5, 0x80, 0x36, 0x1a, 0x7f, 0xe7, 0xcb, 0xcf, 0x98, 0x56, 0x02, 0x9b, 0x0b, 0x26, 0x74, 0x0d, - 0x52, 0x9a, 0x6d, 0x7a, 0x96, 0x7f, 0xad, 0x99, 0x0a, 0xb3, 0x2f, 0x30, 0x1d, 0xb0, 0xa0, 0xa3, - 0x7d, 0x9b, 0x3d, 0x8d, 0x44, 0x10, 0x18, 0x88, 0x07, 0x70, 0x1a, 0x72, 0x4c, 0xb8, 0x20, 0x48, - 0x72, 0x02, 0x06, 0xe2, 0x04, 0x15, 0xc8, 0x3c, 0xd4, 0x0d, 0xd5, 0x7c, 0x48, 0x54, 0x96, 0xc9, - 0x19, 0x3c, 0x58, 0x53, 0x66, 0x4b, 0xb1, 0x5d, 0x5d, 0xe9, 0xc9, 0x8a, 0xa6, 0xb1, 0x06, 0x9b, - 0xc1, 0x20, 0x40, 0x0d, 0x4d, 0x43, 0x2f, 0x40, 0x79, 0x57, 0x37, 0x94, 0x9e, 0xfe, 0x01, 0x91, - 0x6d, 0x96, 0xaf, 0x0e, 0xeb, 0xa7, 0x19, 0x5c, 0xf2, 0xe1, 0x3c, 0x8d, 0x9d, 0xda, 0x8f, 0x25, - 0x28, 0x8e, 0x0e, 0x48, 0x68, 0x1e, 0x60, 0xe8, 0x75, 0x71, 0xe9, 0x3b, 0x49, 0xac, 0x02, 0x5c, - 0x68, 0x0e, 0xd2, 0x3c, 0x2c, 0xc7, 0xfb, 0xcc, 0x27, 0xac, 0x7d, 0x28, 0x41, 0x61, 0x64, 0xd6, - 0x42, 0x13, 0x30, 0xc6, 0x66, 0x2d, 0xa6, 0x44, 0x02, 0xf3, 0xc5, 0x37, 0x91, 0x4d, 0x73, 0x59, - 0xd9, 0x31, 0x6d, 0x5e, 0xad, 0x8e, 0xdd, 0x75, 0xc4, 0xac, 0x5f, 0x18, 0x40, 0xb7, 0xec, 0xae, - 0x53, 0x7b, 0x2a, 0x41, 0x61, 0x64, 0x60, 0x0b, 0xe5, 0x9c, 0x14, 0x2e, 0xc6, 0xb7, 0xa0, 0x24, - 0x48, 0xfa, 0x8a, 0x65, 0xe9, 0x86, 0xe6, 0xeb, 0xf5, 0xd2, 0x31, 0xd3, 0xa0, 0xd0, 0x72, 0x9d, - 0x73, 0xe1, 0x62, 0x37, 0xb8, 0x74, 0xd0, 0x25, 0x28, 0x0e, 0xee, 0xec, 0x3b, 0x8a, 0xdb, 0xbd, - 0xc7, 0xbb, 0x2c, 0xce, 0xdb, 0xfc, 0xaa, 0x3e, 0x4f, 0x61, 0x95, 0x5b, 0x50, 0x18, 0x11, 0x43, - 0x4d, 0xf5, 0x67, 0x06, 0x43, 0x25, 0x7b, 0x42, 0xe7, 0x04, 0x2e, 0x88, 0xb1, 0x81, 0x03, 0x6b, - 0x9f, 0x26, 0x21, 0x1f, 0x9c, 0xd2, 0xd0, 0xab, 0x90, 0x0c, 0x5c, 0x47, 0xae, 0x3c, 0x7b, 0xa6, - 0x63, 0x0b, 0xd6, 0x53, 0x18, 0x13, 0x52, 0xe0, 0x14, 0x79, 0xdf, 0x53, 0x7a, 0xba, 0xbb, 0x2f, - 0x77, 0x4d, 0x43, 0xd5, 0x79, 0x0f, 0xe6, 0x7e, 0xb8, 0x76, 0x8c, 0xac, 0xa6, 0xe0, 0x5c, 0xf0, - 0x19, 0x31, 0x22, 0x87, 0x41, 0x0e, 0xc2, 0x50, 0x14, 0x47, 0x87, 0x1f, 0x7d, 0x7e, 0xd3, 0xfc, - 0xdf, 0x63, 0xa4, 0xf3, 0xfb, 0x9e, 0x48, 0x88, 0x02, 0x17, 0xb1, 0x20, 0xd2, 0xe2, 0x70, 0x74, - 0x93, 0xe1, 0xe8, 0x86, 0xa3, 0x30, 0x16, 0x11, 0x85, 0x3e, 0x8c, 0x87, 0xac, 0x40, 0x57, 0x61, - 0xbc, 0x47, 0x76, 0x7d, 0x7d, 0x79, 0x38, 0xc4, 0xdd, 0xb1, 0x44, 0x11, 0x0b, 0xc3, 0x80, 0xa0, - 0x17, 0x01, 0xd9, 0xba, 0x76, 0xef, 0x10, 0x71, 0x9c, 0x11, 0x97, 0x19, 0x26, 0x40, 0x5d, 0xe9, - 0x40, 0x3e, 0x68, 0x16, 0xb5, 0x83, 0xdf, 0x75, 0x47, 0x36, 0xc9, 0x71, 0x18, 0xdf, 0x60, 0x68, - 0x6a, 0x50, 0x74, 0x2e, 0x90, 0x14, 0xb5, 0x97, 0x21, 0xe3, 0x87, 0x15, 0x65, 0x61, 0xac, 0xb5, - 0xb1, 0xd1, 0xc4, 0xe5, 0x18, 0x2a, 0x02, 0xac, 0x35, 0x97, 0x3a, 0x72, 0x7b, 0xbb, 0xd3, 0xc4, - 0x65, 0x89, 0xae, 0x97, 0xb6, 0xd7, 0xd6, 0xc4, 0x3a, 0x51, 0xdb, 0x05, 0x14, 0x1e, 0xd6, 0x23, - 0x87, 0xaf, 0xbb, 0x00, 0x8a, 0xad, 0xc9, 0xa2, 0x17, 0xc7, 0x8f, 0xba, 0xee, 0xf3, 0xce, 0x22, - 0xa6, 0x4a, 0xc5, 0xd6, 0xd8, 0x93, 0x53, 0x33, 0xe1, 0x54, 0xc4, 0x14, 0x7f, 0x92, 0x0a, 0xfd, - 0x66, 0x07, 0x71, 0xed, 0x57, 0x71, 0x48, 0xd3, 0x69, 0x7e, 0xcd, 0xd4, 0xd0, 0xeb, 0x00, 0x8a, - 0xeb, 0xda, 0xfa, 0x8e, 0xe7, 0x0e, 0x8e, 0x91, 0xe9, 0xe8, 0x8b, 0x41, 0xc3, 0xa7, 0xc3, 0x01, - 0x16, 0x9a, 0x0c, 0x74, 0x1c, 0x0e, 0xc7, 0x37, 0x81, 0x4b, 0x14, 0x11, 0x4c, 0x86, 0x57, 0xa1, - 0x62, 0xee, 0x38, 0xc4, 0x7e, 0x40, 0x54, 0x39, 0xcc, 0x94, 0x60, 0x4c, 0x67, 0x7c, 0x8a, 0xce, - 0x21, 0xe6, 0x2b, 0x50, 0x72, 0xc8, 0x03, 0x62, 0xd3, 0x52, 0x34, 0xbc, 0xfe, 0x0e, 0xb1, 0xc5, - 0xbb, 0xbe, 0xa2, 0x0f, 0xde, 0x60, 0x50, 0xf4, 0x3c, 0x14, 0x06, 0x84, 0x2e, 0xd9, 0x73, 0x59, - 0x62, 0x67, 0x71, 0xde, 0x07, 0x76, 0xc8, 0x9e, 0x4b, 0xd5, 0xde, 0x31, 0xd5, 0xfd, 0x51, 0x0d, - 0x52, 0x5c, 0x6d, 0x8a, 0x08, 0xec, 0x5c, 0xfb, 0x28, 0x09, 0x19, 0x76, 0xfb, 0xb1, 0x14, 0x9a, - 0x92, 0x39, 0x1a, 0x0f, 0xd9, 0x71, 0x6d, 0x3a, 0xb3, 0xb3, 0x34, 0xa0, 0x17, 0x22, 0x0a, 0xdc, - 0x62, 0x30, 0xf4, 0x22, 0x8c, 0x33, 0x92, 0xb0, 0x4b, 0x56, 0x62, 0xb8, 0x44, 0x51, 0x41, 0xbb, - 0x46, 0x23, 0x90, 0xf8, 0xfa, 0x11, 0x58, 0x84, 0x49, 0xd7, 0x56, 0xd8, 0xbc, 0x3a, 0xba, 0x25, - 0x73, 0xcf, 0xfc, 0xf8, 0xc1, 0xe3, 0xe9, 0x42, 0x87, 0x12, 0xb4, 0x16, 0x45, 0xb7, 0x40, 0x8c, - 0xbe, 0xa5, 0x06, 0xd5, 0x68, 0xc0, 0x84, 0x63, 0x29, 0x46, 0x48, 0xc8, 0x18, 0x13, 0xc2, 0x26, - 0x60, 0x6a, 0xff, 0x40, 0xc6, 0x38, 0xa5, 0x1e, 0x15, 0xd1, 0x81, 0x73, 0xa2, 0x5a, 0x23, 0x25, - 0x31, 0xef, 0xce, 0x9f, 0x3e, 0x78, 0x3c, 0x8d, 0x78, 0x91, 0x8f, 0xc8, 0x3b, 0x63, 0x0d, 0x61, - 0x23, 0x52, 0x5f, 0x86, 0x33, 0xc3, 0x0b, 0xdb, 0xa8, 0xc4, 0x34, 0x8b, 0xd7, 0xc4, 0xe0, 0x82, - 0x16, 0x64, 0xbb, 0x0e, 0x93, 0xc4, 0x88, 0x4a, 0xb3, 0x0c, 0x63, 0x42, 0xc4, 0x08, 0x65, 0xd8, - 0x05, 0x80, 0xfb, 0xba, 0xa1, 0xf2, 0x3a, 0x66, 0x6f, 0x2d, 0x12, 0x38, 0x4b, 0x21, 0xac, 0x50, - 0xe7, 0x53, 0xbc, 0xf2, 0x6b, 0xdf, 0x87, 0x12, 0x0d, 0xc6, 0x3a, 0x71, 0x6d, 0xbd, 0xbb, 0xac, - 0x78, 0x1a, 0x41, 0x75, 0x40, 0xbb, 0x3d, 0x53, 0x89, 0x68, 0x89, 0x34, 0xe4, 0x65, 0x86, 0x0b, - 0xee, 0x74, 0x15, 0xca, 0xba, 0xe1, 0x46, 0x27, 0x48, 0x51, 0x37, 0x82, 0xb4, 0xf3, 0x45, 0xc8, - 0xf3, 0x91, 0x8a, 0x53, 0xd7, 0x7e, 0x19, 0x87, 0xf1, 0xe1, 0xfe, 0x5b, 0x5e, 0xbf, 0xaf, 0xd8, - 0xfb, 0xb4, 0xcf, 0x76, 0x4d, 0xcf, 0x88, 0xd2, 0x00, 0x97, 0x19, 0x26, 0xb8, 0xff, 0x0c, 0x94, - 0x1d, 0xaf, 0x1f, 0x55, 0xb3, 0x45, 0xc7, 0xeb, 0x07, 0x29, 0xdf, 0x85, 0xd2, 0xfb, 0x1e, 0x9d, - 0xaa, 0x7b, 0xc4, 0xef, 0x6f, 0x3c, 0x45, 0x6f, 0x44, 0xa7, 0xe8, 0x88, 0x56, 0x75, 0xe6, 0xb8, - 0x86, 0xfb, 0x2d, 0x21, 0x01, 0x17, 0x7d, 0x59, 0xbc, 0xf5, 0x55, 0xbe, 0x0b, 0xa5, 0x43, 0x24, - 0x74, 0x40, 0xf4, 0x89, 0x98, 0xfa, 0x12, 0x1e, 0xac, 0xa9, 0x91, 0x41, 0x57, 0x8c, 0x28, 0x5e, - 0x66, 0x98, 0x60, 0xd9, 0x7e, 0x12, 0x87, 0xc2, 0x48, 0xd5, 0x44, 0xf6, 0xee, 0x37, 0x20, 0xc5, - 0xa5, 0x1d, 0xfd, 0xc2, 0x71, 0x44, 0x88, 0x18, 0x6e, 0x56, 0x62, 0x58, 0xf0, 0xa1, 0xe7, 0x21, - 0xcf, 0x9b, 0x81, 0x48, 0x9c, 0x84, 0x68, 0x09, 0x39, 0x0e, 0x65, 0x06, 0x56, 0x7e, 0x2e, 0x41, - 0x4a, 0x1c, 0x6a, 0x37, 0x06, 0x2f, 0x3f, 0x02, 0x73, 0x49, 0x54, 0xd3, 0x86, 0x61, 0xd3, 0x8e, - 0x3c, 0xe6, 0x12, 0x23, 0xc7, 0x1c, 0xba, 0x0d, 0x67, 0xbb, 0x8a, 0x21, 0xef, 0x10, 0xf9, 0x3d, - 0xc7, 0x34, 0x64, 0x62, 0x74, 0x4d, 0x95, 0xa8, 0xb2, 0x62, 0xdb, 0xca, 0xbe, 0xf8, 0x84, 0x32, - 0xd9, 0x55, 0x8c, 0x79, 0xb2, 0xea, 0x98, 0x46, 0x93, 0x63, 0x1b, 0x14, 0x39, 0x9f, 0x86, 0x31, - 0xa6, 0x7a, 0xed, 0xd3, 0x38, 0xc0, 0x30, 0x8a, 0x91, 0xfe, 0xba, 0xc8, 0xae, 0x45, 0x5d, 0x5b, - 0x67, 0xb7, 0x29, 0xf1, 0x0a, 0x3e, 0x08, 0xa2, 0x5c, 0x9e, 0xa1, 0xbb, 0xdc, 0x0f, 0x98, 0x3d, - 0x1f, 0x6a, 0x72, 0xc9, 0x7f, 0xd3, 0x31, 0x33, 0x16, 0x7d, 0xcc, 0xbc, 0x02, 0x63, 0x1a, 0x2d, - 0xcb, 0x29, 0xc2, 0x22, 0xfa, 0xdc, 0xb3, 0x32, 0x95, 0xd5, 0xef, 0x4a, 0x0c, 0x73, 0x0e, 0xf4, - 0x3a, 0xa4, 0x1d, 0x9e, 0xbb, 0x53, 0xbb, 0x47, 0xbd, 0x00, 0x0e, 0xa5, 0xf9, 0x4a, 0x0c, 0xfb, - 0x5c, 0xb4, 0x49, 0xa8, 0x8a, 0xab, 0xd4, 0xfe, 0x26, 0x01, 0x62, 0x6f, 0xd3, 0x0c, 0xd5, 0x32, - 0x59, 0x45, 0x1b, 0xbb, 0xba, 0x86, 0xce, 0x42, 0xc2, 0xb3, 0x7b, 0xdc, 0xa1, 0xf3, 0xe9, 0x83, - 0xc7, 0xd3, 0x89, 0x6d, 0xbc, 0x86, 0x29, 0x0c, 0xbd, 0x09, 0xe9, 0x7b, 0x44, 0x51, 0x89, 0xed, - 0x4f, 0x10, 0xd7, 0x8f, 0x78, 0x3f, 0x37, 0x22, 0xb1, 0xbe, 0xc2, 0x79, 0x9a, 0x86, 0x6b, 0xef, - 0x63, 0x5f, 0x02, 0xad, 0x22, 0xdd, 0x70, 0x48, 0xd7, 0xb3, 0xfd, 0xaf, 0x67, 0x83, 0x35, 0x9a, - 0x82, 0x34, 0xf5, 0x98, 0xe9, 0xb9, 0xe2, 0x00, 0xf5, 0x97, 0x95, 0x3b, 0x90, 0x0f, 0x8a, 0x43, - 0x65, 0x48, 0xdc, 0x27, 0xfb, 0x22, 0xfc, 0xf4, 0x91, 0xde, 0x5c, 0x78, 0x92, 0xf3, 0xb8, 0xf3, - 0xc5, 0x9d, 0xf8, 0x6d, 0xa9, 0xd6, 0x86, 0x3c, 0xd5, 0x0e, 0x13, 0xfe, 0xf2, 0xe4, 0x5f, 0x1e, - 0x2c, 0x6a, 0xbf, 0x8d, 0xc3, 0xe9, 0xe8, 0xf7, 0x91, 0x68, 0x1d, 0x4a, 0x44, 0x78, 0x81, 0x4e, - 0xe5, 0xbb, 0xba, 0xff, 0x0d, 0xef, 0xd2, 0x49, 0x5c, 0x86, 0x8b, 0x64, 0x34, 0x28, 0x77, 0x20, - 0x63, 0x0b, 0xb5, 0x45, 0x13, 0xa8, 0x46, 0xcb, 0xf1, 0x8d, 0xc3, 0x03, 0x7a, 0x74, 0x0b, 0xd2, - 0x7d, 0x96, 0x0b, 0x7e, 0x5f, 0x3c, 0xff, 0xac, 0x84, 0xc1, 0x3e, 0x31, 0xba, 0x06, 0x63, 0xf4, - 0x90, 0xf4, 0x6b, 0xa1, 0x12, 0xcd, 0x45, 0x4f, 0x43, 0xcc, 0x09, 0xd1, 0x4b, 0x90, 0xec, 0x99, - 0x9a, 0xff, 0xf5, 0xef, 0x6c, 0x34, 0xc3, 0x9a, 0xa9, 0x61, 0x46, 0x56, 0xfb, 0x9d, 0x04, 0xe5, - 0xc3, 0x57, 0x59, 0xf4, 0x2a, 0x64, 0xba, 0xa6, 0xe1, 0xb8, 0x8a, 0xe1, 0x0a, 0x8f, 0x3d, 0x7b, - 0x4c, 0x5d, 0x89, 0xe1, 0x01, 0x03, 0x9a, 0x3b, 0xd4, 0x29, 0x8f, 0xbc, 0x9e, 0x06, 0x7a, 0xe3, - 0x1c, 0x24, 0x77, 0x3d, 0xa3, 0x2b, 0xbe, 0xc2, 0x9c, 0x3f, 0x6a, 0xb3, 0x25, 0xcf, 0xe8, 0xae, - 0xc4, 0x30, 0xa3, 0x1d, 0x76, 0xa3, 0xdf, 0xc7, 0x21, 0x17, 0x50, 0x06, 0xcd, 0x42, 0x96, 0xd6, - 0xd6, 0x71, 0x6d, 0x33, 0xa3, 0x8a, 0x27, 0x34, 0x0d, 0xb0, 0x63, 0x9a, 0x3d, 0x79, 0x98, 0xb2, - 0x99, 0x95, 0x18, 0xce, 0x52, 0x18, 0x97, 0xf8, 0x1c, 0xe4, 0x74, 0xc3, 0xbd, 0x75, 0x33, 0xd0, - 0xb9, 0xe9, 0x11, 0x0c, 0xfa, 0xe0, 0x1d, 0x2e, 0xba, 0x0c, 0x05, 0x76, 0x7c, 0x0f, 0x88, 0x68, - 0xcd, 0x48, 0x2b, 0x31, 0x9c, 0x17, 0x60, 0x4e, 0x76, 0xf8, 0x10, 0x18, 0x8b, 0x38, 0x04, 0xd0, - 0x0c, 0xb0, 0x5e, 0x75, 0xeb, 0xa6, 0x6c, 0x38, 0x82, 0x2e, 0x25, 0xb6, 0x2c, 0x70, 0xc4, 0x86, - 0xc3, 0x29, 0x6f, 0x43, 0xc1, 0xd3, 0x0d, 0xf7, 0xfa, 0xdc, 0x6d, 0x41, 0xc7, 0x3f, 0x72, 0x8c, - 0x0f, 0xcd, 0xdd, 0x6e, 0x31, 0x34, 0xfb, 0x78, 0xc0, 0x29, 0xf9, 0x94, 0xe2, 0x7b, 0x6f, 0x35, - 0x99, 0xc9, 0x94, 0xb3, 0xb5, 0x2f, 0x24, 0x80, 0xa1, 0x8f, 0x23, 0x3b, 0xfa, 0x1d, 0xc8, 0xea, - 0x86, 0xee, 0xca, 0x8a, 0xad, 0x9d, 0xf0, 0xf2, 0x92, 0xa1, 0xf4, 0x0d, 0x5b, 0x73, 0xd0, 0x2d, - 0x48, 0x32, 0xb6, 0xc4, 0x89, 0xdf, 0x7c, 0x31, 0x7a, 0xf1, 0xbd, 0x91, 0xb7, 0x9f, 0xb8, 0xae, - 0xa2, 0x3b, 0x50, 0xa2, 0x70, 0x79, 0x10, 0x5f, 0x9e, 0xe7, 0xd1, 0x01, 0x2e, 0x50, 0x52, 0x7f, - 0xe5, 0xd4, 0xfe, 0x1e, 0x87, 0x53, 0x11, 0xaf, 0xb9, 0x06, 0xb6, 0x26, 0x8e, 0xb2, 0x35, 0xf9, - 0xf5, 0x6c, 0x7d, 0x4d, 0xd8, 0xca, 0x0b, 0xf0, 0x85, 0x13, 0xbd, 0x6b, 0xab, 0x37, 0x6c, 0x6d, - 0xc4, 0xe4, 0xd4, 0xb3, 0x4c, 0x4e, 0x9f, 0xd0, 0xe4, 0xca, 0x0f, 0x20, 0xd1, 0xb0, 0xb5, 0xff, - 0x78, 0x39, 0x0f, 0x4b, 0x73, 0x6e, 0x30, 0xcd, 0x50, 0x2f, 0x9b, 0x2a, 0x11, 0x57, 0x73, 0xf6, - 0x4c, 0x4f, 0x89, 0xe0, 0x65, 0x9c, 0x2f, 0xae, 0xfe, 0x35, 0x0e, 0xf9, 0xe0, 0xa7, 0x5f, 0x74, - 0x16, 0x26, 0xdb, 0x9b, 0x4d, 0xdc, 0xe8, 0xb4, 0xb1, 0xdc, 0x79, 0x67, 0xb3, 0x29, 0x6f, 0x6f, - 0xbc, 0xb9, 0xd1, 0x7e, 0x7b, 0xa3, 0x1c, 0x43, 0xe7, 0xe0, 0xf4, 0x7a, 0x73, 0xbd, 0x8d, 0xdf, - 0x91, 0xb7, 0xda, 0xdb, 0x78, 0xa1, 0x29, 0xfb, 0x84, 0xe5, 0xa7, 0x69, 0x74, 0x16, 0x26, 0x96, - 0xf1, 0xe6, 0x42, 0x08, 0xf5, 0xa7, 0x0c, 0x45, 0xd1, 0x3b, 0x7b, 0x08, 0xf5, 0x49, 0x16, 0x55, - 0x60, 0xb2, 0xb9, 0xbe, 0xd9, 0x09, 0x4b, 0xfc, 0x29, 0xa0, 0x71, 0xc8, 0xaf, 0x37, 0x36, 0x87, - 0xa0, 0x47, 0x25, 0x74, 0x06, 0x50, 0x63, 0x79, 0x19, 0x37, 0x97, 0x1b, 0x9d, 0x00, 0xed, 0x6f, - 0xca, 0x68, 0x02, 0x4a, 0x4b, 0xad, 0xb5, 0x4e, 0x13, 0x0f, 0xa1, 0x3f, 0x1b, 0x47, 0xa7, 0xa0, - 0xb8, 0xd6, 0x5a, 0x6f, 0x75, 0x86, 0xc0, 0x7f, 0x30, 0xe0, 0xf6, 0x46, 0xab, 0xbd, 0x31, 0x04, - 0x7e, 0x81, 0x10, 0x82, 0xc2, 0x6a, 0xbb, 0x15, 0x80, 0xfd, 0xe1, 0x14, 0x55, 0xdb, 0x37, 0xb7, - 0xb5, 0xf1, 0xe6, 0x10, 0xf5, 0xf1, 0x12, 0xd5, 0x83, 0x1b, 0x3b, 0x82, 0xf8, 0x68, 0x19, 0x55, - 0xe1, 0x6c, 0xbb, 0xd3, 0x5c, 0x93, 0x9b, 0xdf, 0xde, 0x6c, 0xe3, 0xce, 0x21, 0xfc, 0x57, 0xcb, - 0xf3, 0x77, 0x1f, 0x3d, 0xa9, 0xc6, 0x3e, 0x7f, 0x52, 0x8d, 0x7d, 0xf5, 0xa4, 0x2a, 0x7d, 0x78, - 0x50, 0x95, 0x3e, 0x3e, 0xa8, 0x4a, 0x7f, 0x3c, 0xa8, 0x4a, 0x8f, 0x0e, 0xaa, 0xd2, 0x17, 0x07, - 0x55, 0xe9, 0xe9, 0x41, 0x35, 0xf6, 0xd5, 0x41, 0x55, 0xfa, 0xc9, 0x97, 0xd5, 0xd8, 0xa3, 0x2f, - 0xab, 0xb1, 0xcf, 0xbf, 0xac, 0xc6, 0xbe, 0x93, 0xe2, 0xa1, 0xdf, 0x49, 0xb1, 0xef, 0x59, 0x37, - 0xfe, 0x19, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xa5, 0x24, 0xbc, 0x5d, 0x24, 0x00, 0x00, + // 3333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x4d, 0x6c, 0x1b, 0x47, + 0x96, 0x66, 0x93, 0x14, 0x7f, 0x1e, 0x7f, 0x55, 0x96, 0x6c, 0x99, 0xb6, 0x29, 0x87, 0xb1, 0xd7, + 0x8a, 0x37, 0xa1, 0x6c, 0xd9, 0xf1, 0x3a, 0x8e, 0xb3, 0x09, 0x25, 0x51, 0x12, 0x15, 0x49, 0xd4, + 0x96, 0xa8, 0x64, 0xb3, 0x1b, 0x6c, 0xa3, 0xc5, 0x2e, 0xb5, 0x3b, 0x26, 0xbb, 0x3b, 0xfd, 0x63, + 0x4b, 0x01, 0x16, 0x9b, 0xdd, 0xd3, 0x1e, 0x72, 0xd8, 0xc3, 0x1e, 0x16, 0x7b, 0xdf, 0x45, 0x2e, + 0x33, 0xc8, 0x61, 0x8e, 0x73, 0x98, 0x01, 0x06, 0xc8, 0x1c, 0x06, 0x81, 0x67, 0x4e, 0x39, 0x19, + 0xb1, 0x72, 0xf1, 0x61, 0x30, 0xc8, 0xdc, 0xe7, 0x30, 0xa8, 0x9f, 0x26, 0x9b, 0xea, 0xa6, 0xa5, + 0x64, 0x06, 0x03, 0xcc, 0xc1, 0x16, 0xeb, 0xd5, 0xf7, 0x5e, 0xbd, 0xbf, 0x7a, 0xf5, 0xaa, 0x1a, + 0x2e, 0x3a, 0x76, 0x77, 0xbe, 0xab, 0xd8, 0x86, 0xe9, 0xce, 0x5b, 0x3d, 0xc5, 0xb0, 0xf6, 0xd8, + 0x9f, 0xba, 0x65, 0x9b, 0xae, 0x89, 0xca, 0xd6, 0x41, 0x9d, 0x4f, 0xd6, 0xf9, 0x64, 0x65, 0x4a, + 0x33, 0x35, 0x93, 0x4d, 0xce, 0xd3, 0x5f, 0x1c, 0x57, 0xa9, 0x6a, 0xa6, 0xa9, 0xf5, 0xc8, 0x3c, + 0x1b, 0xed, 0x79, 0xfb, 0xf3, 0x8f, 0x6d, 0xc5, 0xb2, 0x88, 0xed, 0x88, 0xf9, 0x59, 0xba, 0x8a, + 0x62, 0xe9, 0x1c, 0x30, 0xef, 0x79, 0xba, 0x6a, 0xed, 0xb1, 0x3f, 0x02, 0x70, 0x85, 0x02, 0x9c, + 0x07, 0x8a, 0x4d, 0xd4, 0x79, 0xf7, 0xd0, 0x22, 0x0e, 0xff, 0xdf, 0xda, 0xe3, 0x7f, 0x39, 0xaa, + 0xf6, 0xef, 0x12, 0xe4, 0xb6, 0x7b, 0x8a, 0xd1, 0xb6, 0x5c, 0xdd, 0x34, 0x1c, 0x34, 0x03, 0x69, + 0x72, 0x60, 0xf5, 0x14, 0xdd, 0x98, 0x89, 0x5f, 0x96, 0xe6, 0x32, 0xd8, 0x1f, 0xd2, 0x19, 0xc5, + 0x50, 0x7a, 0x87, 0x9f, 0x90, 0x99, 0x04, 0x9f, 0x11, 0x43, 0x74, 0x17, 0xce, 0xf7, 0x95, 0x03, + 0xd9, 0xf4, 0x5c, 0xcb, 0x73, 0x65, 0xdb, 0x7c, 0xec, 0xc8, 0x16, 0xb1, 0x65, 0x57, 0xd9, 0xeb, + 0x91, 0x99, 0xe4, 0x65, 0x69, 0x2e, 0x81, 0xa7, 0xfb, 0xca, 0x41, 0x9b, 0xcd, 0x63, 0xf3, 0xb1, + 0xb3, 0x4d, 0xec, 0x0e, 0x9d, 0x5c, 0x4f, 0x66, 0xa4, 0x72, 0xbc, 0xf6, 0x2c, 0x01, 0x49, 0xaa, + 0x03, 0xba, 0x06, 0x09, 0x55, 0xd1, 0x66, 0xa4, 0xcb, 0xd2, 0x5c, 0x6e, 0x61, 0xba, 0x7e, 0xdc, + 0x53, 0xf5, 0xe5, 0xc6, 0x2a, 0xa6, 0x08, 0x74, 0x1b, 0x26, 0x0c, 0x53, 0x25, 0xce, 0x4c, 0xfc, + 0x72, 0x62, 0x2e, 0xb7, 0x50, 0x0d, 0x43, 0xa9, 0xbc, 0x15, 0x5b, 0xd1, 0xfa, 0xc4, 0x70, 0x31, + 0x07, 0xa3, 0x77, 0x20, 0x4f, 0x67, 0x65, 0x93, 0xdb, 0xca, 0x54, 0xcb, 0x2d, 0x5c, 0x8a, 0x66, + 0x16, 0x0e, 0xc1, 0x39, 0x2b, 0xe0, 0x9d, 0x1d, 0x40, 0xba, 0xd1, 0x35, 0xfb, 0xba, 0xa1, 0xc9, + 0x8a, 0x46, 0x0c, 0x57, 0xd6, 0x55, 0x67, 0x66, 0x82, 0x29, 0x51, 0xa2, 0x72, 0x78, 0x18, 0xea, + 0xbb, 0xbb, 0xad, 0xe5, 0xc5, 0xa9, 0xa3, 0xa7, 0xb3, 0xe5, 0x96, 0x80, 0x37, 0x28, 0xba, 0xb5, + 0xec, 0xe0, 0xb2, 0x3e, 0x42, 0x51, 0x1d, 0xe4, 0xc1, 0x25, 0x72, 0x40, 0xba, 0x1e, 0x5d, 0x42, + 0x76, 0x5c, 0xc5, 0xf5, 0x1c, 0x59, 0x25, 0x8e, 0xab, 0x1b, 0x0a, 0xd7, 0x33, 0xc5, 0xe4, 0xdf, + 0x8c, 0xd6, 0xb3, 0xde, 0xf4, 0x79, 0x77, 0x18, 0xeb, 0xf2, 0x90, 0x13, 0x5f, 0x20, 0x63, 0xe7, + 0x9c, 0xca, 0x3e, 0x54, 0xc6, 0xb3, 0xa2, 0x97, 0x20, 0xaf, 0xd9, 0x56, 0x57, 0x56, 0x54, 0xd5, + 0x26, 0x8e, 0xc3, 0x62, 0x92, 0xc5, 0x39, 0x4a, 0x6b, 0x70, 0x12, 0xba, 0x0a, 0x45, 0xc7, 0xe9, + 0xc9, 0xae, 0x62, 0x6b, 0xc4, 0x35, 0x94, 0x3e, 0x61, 0x19, 0x93, 0xc5, 0x05, 0xc7, 0xe9, 0x75, + 0x06, 0xc4, 0xf5, 0x64, 0x26, 0x51, 0x4e, 0xd6, 0x0e, 0x21, 0x1f, 0x0c, 0x09, 0x2a, 0x42, 0x5c, + 0x57, 0x99, 0xd4, 0x24, 0x8e, 0xeb, 0xaa, 0x1f, 0xfa, 0xf8, 0x89, 0xa1, 0xbf, 0xe1, 0x87, 0x3e, + 0xc1, 0xbc, 0x52, 0x89, 0xf6, 0xca, 0x96, 0xa9, 0x12, 0x11, 0xf6, 0xda, 0xff, 0x49, 0x90, 0x58, + 0x6e, 0xac, 0xa2, 0x5b, 0x3e, 0xa7, 0xc4, 0x38, 0x2f, 0x45, 0x2e, 0x42, 0xff, 0x05, 0x98, 0x2b, + 0x3a, 0xa4, 0x05, 0x25, 0xa4, 0x32, 0xb5, 0xdf, 0xb4, 0x5d, 0xa2, 0xca, 0x96, 0x62, 0x13, 0xc3, + 0xa5, 0x09, 0x95, 0x98, 0x4b, 0xe2, 0x02, 0xa7, 0x6e, 0x73, 0x22, 0xba, 0x06, 0x25, 0x01, 0xeb, + 0x3e, 0xd0, 0x7b, 0xaa, 0x4d, 0x0c, 0xa6, 0x7a, 0x12, 0x0b, 0xee, 0x25, 0x41, 0xad, 0xad, 0x40, + 0xc6, 0x57, 0x3d, 0xb4, 0xd6, 0x75, 0x88, 0x9b, 0x96, 0xf0, 0x4e, 0x84, 0xc9, 0x6d, 0x8b, 0xd8, + 0x8a, 0x6b, 0xda, 0x38, 0x6e, 0x5a, 0xb5, 0xff, 0xc8, 0x42, 0xc6, 0x27, 0xa0, 0xbf, 0x83, 0xb4, + 0x69, 0xc9, 0x74, 0xc7, 0x33, 0x69, 0xc5, 0xa8, 0xbd, 0xe2, 0x83, 0x3b, 0x87, 0x16, 0xc1, 0x29, + 0xd3, 0xa2, 0x7f, 0xd1, 0x06, 0x14, 0xfa, 0xa4, 0x2f, 0x3b, 0xa6, 0x67, 0x77, 0x89, 0x3c, 0x58, + 0xfc, 0x6f, 0xc2, 0xec, 0x9b, 0xa4, 0x6f, 0xda, 0x87, 0x3b, 0x0c, 0xe8, 0x8b, 0x5a, 0x8b, 0xe1, + 0x5c, 0x9f, 0xf4, 0x7d, 0x22, 0xba, 0x03, 0xa9, 0xbe, 0x62, 0x51, 0x31, 0x89, 0x71, 0x9b, 0x6e, + 0x53, 0xb1, 0x02, 0xdc, 0x13, 0x7d, 0x3a, 0x44, 0xf7, 0x21, 0xa5, 0x68, 0x1a, 0xe5, 0xe3, 0x9b, + 0xf5, 0xe5, 0x30, 0x5f, 0x43, 0xd3, 0x6c, 0xa2, 0x29, 0x6e, 0x70, 0xed, 0x09, 0x45, 0xd3, 0xda, + 0x16, 0x5a, 0x81, 0x1c, 0xb3, 0x41, 0x37, 0x1e, 0x52, 0x11, 0x13, 0x4c, 0xc4, 0x95, 0xb1, 0x16, + 0xe8, 0xc6, 0xc3, 0x80, 0x8c, 0x2c, 0xd5, 0x9f, 0x91, 0xd0, 0xdb, 0x90, 0xdd, 0xd7, 0x7b, 0x2e, + 0xb1, 0xa9, 0x94, 0x14, 0x93, 0x72, 0x39, 0x2c, 0x65, 0x85, 0x41, 0x02, 0x12, 0x32, 0xfb, 0x82, + 0x82, 0xee, 0x43, 0xa6, 0xa7, 0xf7, 0x75, 0x97, 0xf2, 0xa7, 0x19, 0xff, 0x6c, 0x98, 0x7f, 0x83, + 0x22, 0x02, 0xec, 0xe9, 0x1e, 0x27, 0x50, 0x6e, 0xcf, 0xa0, 0xc5, 0xc1, 0xb4, 0x66, 0x32, 0xe3, + 0xb8, 0x77, 0x29, 0x22, 0xc8, 0xed, 0x71, 0x02, 0xfa, 0x17, 0x28, 0xb2, 0x9d, 0x3c, 0x8c, 0x64, + 0x76, 0x9c, 0x1f, 0x56, 0xf1, 0xf6, 0xd2, 0x68, 0x1c, 0x17, 0xcb, 0x47, 0x4f, 0x67, 0xf3, 0x41, + 0xfa, 0x5a, 0x0c, 0xb3, 0xca, 0x30, 0x08, 0xed, 0xfb, 0xa2, 0x52, 0xf8, 0x5e, 0x7e, 0xce, 0x0d, + 0xac, 0x8d, 0x11, 0x1f, 0x70, 0xf2, 0x62, 0xf1, 0xe8, 0xe9, 0x2c, 0x0c, 0xa9, 0x6b, 0x31, 0x0c, + 0x4c, 0x34, 0xf7, 0xfa, 0x1b, 0x90, 0xfe, 0xc8, 0xd4, 0x99, 0xd5, 0x39, 0x26, 0x32, 0x22, 0x75, + 0xd7, 0x4d, 0x3d, 0x68, 0x74, 0xea, 0x23, 0x36, 0x46, 0x1b, 0x50, 0xf4, 0x54, 0x77, 0x3f, 0x60, + 0x73, 0x7e, 0x9c, 0xcd, 0xbb, 0xcb, 0x9d, 0x95, 0x50, 0xee, 0xe6, 0x29, 0xf7, 0xc0, 0xc2, 0x36, + 0x94, 0x48, 0xdf, 0x72, 0x0f, 0x03, 0xe2, 0x0a, 0x4c, 0xdc, 0xd5, 0xb0, 0xb8, 0x26, 0x05, 0x86, + 0xe4, 0x15, 0x48, 0x90, 0x8c, 0x3e, 0x84, 0xbc, 0xe9, 0x92, 0xde, 0xc0, 0x65, 0x45, 0x26, 0x6d, + 0x2e, 0x62, 0x67, 0x76, 0x48, 0xaf, 0x79, 0x60, 0x99, 0xb6, 0x1b, 0xf6, 0x1b, 0x9d, 0x1b, 0xfa, + 0x8d, 0xca, 0x13, 0x7e, 0x6b, 0x40, 0xba, 0x6b, 0x1a, 0x2e, 0x39, 0x70, 0x67, 0x4a, 0xac, 0xd2, + 0x5d, 0x1b, 0xbf, 0xe5, 0xeb, 0x4b, 0x1c, 0xd9, 0x34, 0x5c, 0xfb, 0x10, 0xfb, 0x7c, 0x95, 0x7b, + 0x90, 0x0f, 0x4e, 0xa0, 0x32, 0x24, 0x1e, 0x92, 0x43, 0x71, 0x08, 0xd0, 0x9f, 0x68, 0x0a, 0x26, + 0x1e, 0x29, 0x3d, 0xcf, 0xaf, 0xf9, 0x7c, 0x70, 0x2f, 0x7e, 0x57, 0x5a, 0x4c, 0xd2, 0x52, 0x55, + 0xfb, 0x75, 0x1c, 0xa6, 0xa2, 0x0a, 0x03, 0x42, 0x90, 0x64, 0x67, 0x05, 0x97, 0xc5, 0x7e, 0xa3, + 0x59, 0xc8, 0x75, 0xcd, 0x9e, 0xd7, 0x37, 0x64, 0x5d, 0x3d, 0xe0, 0x87, 0x7a, 0x02, 0x03, 0x27, + 0xb5, 0xd4, 0x03, 0x87, 0x9e, 0x46, 0x02, 0x40, 0xf1, 0xbc, 0xf6, 0x67, 0xb1, 0x60, 0xda, 0xa2, + 0x24, 0xf4, 0xfa, 0x00, 0xc2, 0xda, 0x1b, 0x56, 0x8b, 0x8b, 0x0b, 0x88, 0x9a, 0xce, 0xfb, 0x9d, + 0x65, 0xc5, 0x55, 0x58, 0x85, 0x13, 0x6c, 0xf4, 0xb7, 0x83, 0xee, 0x01, 0x38, 0xae, 0x62, 0xbb, + 0xb2, 0xab, 0xf7, 0x89, 0xa8, 0x10, 0x17, 0xea, 0xbc, 0xf7, 0xaa, 0xfb, 0xbd, 0x57, 0xbd, 0x65, + 0xb8, 0x77, 0x6e, 0xbf, 0x47, 0x4d, 0xc4, 0x59, 0x06, 0xef, 0xe8, 0x7d, 0xda, 0xf7, 0x64, 0x1d, + 0x97, 0x56, 0x57, 0xca, 0x9a, 0x3a, 0x99, 0x35, 0x43, 0xd1, 0x8c, 0xf3, 0x2c, 0xa4, 0x58, 0x77, + 0xe4, 0xb2, 0x6a, 0x90, 0xc5, 0x62, 0x84, 0x2e, 0x52, 0x89, 0x36, 0x51, 0x68, 0x7f, 0xc0, 0xb6, + 0x7a, 0x06, 0x0f, 0x09, 0xb5, 0xaf, 0x24, 0x40, 0xe1, 0x52, 0x15, 0xe9, 0xd1, 0xe3, 0xde, 0x88, + 0x9f, 0xce, 0x1b, 0xa7, 0xf0, 0xf3, 0x3a, 0x4c, 0x0b, 0x88, 0x43, 0xfa, 0x8a, 0xe1, 0xea, 0xdd, + 0x11, 0x87, 0x9f, 0x1d, 0x2e, 0xb1, 0x23, 0xe6, 0xd9, 0x32, 0x67, 0x38, 0x53, 0x90, 0xe6, 0xd4, + 0x0c, 0x40, 0xe1, 0x92, 0x13, 0xd2, 0x5d, 0xfa, 0x61, 0xba, 0xc7, 0x43, 0xba, 0xd7, 0xbe, 0x4a, + 0x42, 0xf9, 0x78, 0x11, 0x62, 0x7d, 0xed, 0x48, 0x93, 0xe3, 0x0f, 0xd1, 0xdd, 0xd1, 0xca, 0xa9, + 0xab, 0xec, 0xf0, 0x4a, 0x1e, 0xaf, 0x89, 0xad, 0xe5, 0xd1, 0x9a, 0xd8, 0x52, 0xd1, 0x0e, 0xe4, + 0x45, 0x37, 0x3c, 0x6c, 0x82, 0x73, 0x0b, 0xf5, 0x93, 0x4b, 0x62, 0x1d, 0x13, 0xc7, 0xeb, 0xb9, + 0xac, 0x3b, 0xa6, 0x67, 0x28, 0x97, 0xc2, 0x86, 0x48, 0x03, 0xd4, 0x35, 0x0d, 0x83, 0x74, 0x5d, + 0x7e, 0x16, 0xf0, 0xe6, 0x90, 0xa7, 0xec, 0xdd, 0x53, 0x88, 0xa6, 0x84, 0xa5, 0x81, 0x00, 0xbf, + 0xbf, 0x9d, 0xec, 0x1e, 0x27, 0x55, 0x7e, 0x23, 0x41, 0x2e, 0xa0, 0x07, 0xba, 0x04, 0xc0, 0xcc, + 0x90, 0x03, 0x69, 0x96, 0x65, 0x94, 0xad, 0xbf, 0x9a, 0x5c, 0xab, 0xfc, 0x3d, 0x4c, 0x47, 0x3a, + 0x20, 0xa2, 0x8d, 0x95, 0x22, 0xda, 0xd8, 0xc5, 0x02, 0xe4, 0x02, 0x4d, 0xf9, 0x7a, 0x32, 0x13, + 0x2f, 0x27, 0x6a, 0x8f, 0x20, 0x17, 0x68, 0x5b, 0xd0, 0x32, 0xe4, 0xc8, 0x81, 0x45, 0x73, 0x87, + 0x85, 0x86, 0xf7, 0x99, 0x11, 0x07, 0xe1, 0x4e, 0x57, 0xe9, 0x29, 0x76, 0x73, 0x00, 0xc5, 0x41, + 0xb6, 0xd3, 0x24, 0xf2, 0x8f, 0xe3, 0x30, 0x19, 0xea, 0x7b, 0xd0, 0x5b, 0x90, 0x62, 0x65, 0xd8, + 0x5f, 0xf9, 0xea, 0x0b, 0x9a, 0xa5, 0xc0, 0xe2, 0x82, 0x09, 0xdd, 0x80, 0x94, 0x66, 0x9b, 0x9e, + 0xe5, 0xdf, 0xaa, 0x66, 0xc2, 0xec, 0x4b, 0x4c, 0x07, 0x2c, 0x70, 0xb4, 0x6e, 0xb3, 0x5f, 0x23, + 0x11, 0x04, 0x46, 0xe2, 0x01, 0x9c, 0x85, 0x1c, 0x13, 0x2e, 0x00, 0x49, 0x0e, 0x60, 0x24, 0x0e, + 0xa8, 0x40, 0xe6, 0xb1, 0x6e, 0xa8, 0xe6, 0x63, 0xa2, 0xb2, 0x4c, 0xce, 0xe0, 0xc1, 0x98, 0x32, + 0x5b, 0x8a, 0xed, 0xea, 0x4a, 0x4f, 0x56, 0x34, 0x8d, 0x15, 0xd8, 0x0c, 0x06, 0x41, 0x6a, 0x68, + 0x1a, 0x7a, 0x05, 0xca, 0xfb, 0xba, 0xa1, 0xf4, 0xf4, 0x4f, 0x88, 0x6c, 0xb3, 0x7c, 0x75, 0x58, + 0x3d, 0xcd, 0xe0, 0x92, 0x4f, 0xe7, 0x69, 0xec, 0xd4, 0xfe, 0x53, 0x82, 0xe2, 0x68, 0x7f, 0x86, + 0x16, 0x01, 0x86, 0x5e, 0x17, 0x77, 0xce, 0xd3, 0xc4, 0x2a, 0xc0, 0x85, 0x16, 0xe8, 0x51, 0x4b, + 0x5d, 0x72, 0xb2, 0xcf, 0x7c, 0x60, 0xed, 0x53, 0x09, 0x0a, 0x23, 0xad, 0x1e, 0x3d, 0x4b, 0x59, + 0xab, 0xc7, 0x94, 0x48, 0x60, 0x3e, 0xf8, 0x21, 0xb2, 0x69, 0x2e, 0x2b, 0x7b, 0xa6, 0xcd, 0x77, + 0xab, 0x63, 0x77, 0x1d, 0x71, 0xd5, 0x28, 0x0c, 0xa8, 0x3b, 0x76, 0xd7, 0xa9, 0x3d, 0x97, 0xa0, + 0x30, 0xd2, 0x2f, 0x86, 0x72, 0x4e, 0x0a, 0x6f, 0xc6, 0xf7, 0xa0, 0x24, 0x20, 0x7d, 0xc5, 0xb2, + 0x74, 0x43, 0xf3, 0xf5, 0x7a, 0xed, 0x84, 0x66, 0x54, 0x68, 0xb9, 0xc9, 0xb9, 0x70, 0xb1, 0x1b, + 0x1c, 0x3a, 0xe8, 0x0a, 0x14, 0x07, 0x4f, 0x06, 0x7b, 0x8a, 0xdb, 0x7d, 0xc0, 0xab, 0x2c, 0xce, + 0xdb, 0xfc, 0xa5, 0x60, 0x91, 0xd2, 0x2a, 0x77, 0xa0, 0x30, 0x22, 0x86, 0x9a, 0xea, 0xf7, 0x0c, + 0x86, 0x4a, 0x0e, 0x84, 0xce, 0x09, 0x5c, 0x10, 0x6d, 0x03, 0x27, 0xd6, 0xbe, 0x4c, 0x42, 0x3e, + 0xd8, 0x24, 0xa2, 0x37, 0x21, 0x19, 0xb8, 0x0d, 0x5d, 0x7b, 0x71, 0x4b, 0xc9, 0x06, 0xac, 0xa6, + 0x30, 0x26, 0xa4, 0xc0, 0x19, 0xf2, 0xb1, 0xa7, 0xf4, 0x74, 0xf7, 0x50, 0xee, 0x9a, 0x86, 0xaa, + 0xf3, 0x1a, 0xcc, 0xfd, 0x70, 0xe3, 0x04, 0x59, 0x4d, 0xc1, 0xb9, 0xe4, 0x33, 0x62, 0x44, 0x8e, + 0x93, 0x1c, 0x84, 0xa1, 0x28, 0x8e, 0x0e, 0x3f, 0xfa, 0xfc, 0xa2, 0xfb, 0xb7, 0x27, 0x48, 0xe7, + 0xd7, 0x4d, 0x91, 0x10, 0x05, 0x2e, 0x62, 0x49, 0xa4, 0xc5, 0xf1, 0xe8, 0x26, 0xc3, 0xd1, 0x0d, + 0x47, 0x61, 0x22, 0x22, 0x0a, 0x7d, 0x98, 0x0c, 0x59, 0x81, 0xae, 0xc3, 0x64, 0x8f, 0xec, 0xfb, + 0xfa, 0xf2, 0x70, 0x88, 0xab, 0x6b, 0x89, 0x4e, 0x2c, 0x0d, 0x03, 0x82, 0x5e, 0x05, 0x64, 0xeb, + 0xda, 0x83, 0x63, 0xe0, 0x38, 0x03, 0x97, 0xd9, 0x4c, 0x00, 0x5d, 0xe9, 0x40, 0x3e, 0x68, 0x16, + 0xb5, 0x83, 0x5f, 0xb5, 0x47, 0x16, 0xc9, 0x71, 0x1a, 0x5f, 0x60, 0x68, 0x6a, 0x50, 0x74, 0x2e, + 0x90, 0x14, 0xb5, 0xd7, 0x21, 0xe3, 0x87, 0x15, 0x65, 0x61, 0xa2, 0xb5, 0xb5, 0xd5, 0xc4, 0xe5, + 0x18, 0x2a, 0x02, 0x6c, 0x34, 0x57, 0x3a, 0x72, 0x7b, 0xb7, 0xd3, 0xc4, 0x65, 0x89, 0x8e, 0x57, + 0x76, 0x37, 0x36, 0xc4, 0x38, 0x51, 0xdb, 0x07, 0x14, 0xbe, 0x2b, 0x44, 0x36, 0x5f, 0xf7, 0x01, + 0x14, 0x5b, 0x93, 0x45, 0x2d, 0x8e, 0x8f, 0x7b, 0x6d, 0xe0, 0x95, 0x45, 0x74, 0x95, 0x8a, 0xad, + 0xb1, 0x5f, 0x4e, 0xcd, 0x84, 0x33, 0x11, 0x97, 0x88, 0xd3, 0xec, 0xd0, 0x1f, 0x76, 0x10, 0xd7, + 0x7e, 0x14, 0x87, 0x34, 0xbd, 0x4c, 0x6c, 0x98, 0x1a, 0x7a, 0x1b, 0x40, 0x71, 0x5d, 0x5b, 0xdf, + 0xf3, 0xdc, 0xc1, 0x31, 0x32, 0x1b, 0x7d, 0x2f, 0x69, 0xf8, 0x38, 0x1c, 0x60, 0xa1, 0xc9, 0x40, + 0xdb, 0xe1, 0x70, 0x7c, 0x13, 0xb8, 0x44, 0x27, 0x82, 0xc9, 0xf0, 0x26, 0x54, 0xcc, 0x3d, 0x87, + 0xd8, 0x8f, 0x88, 0x2a, 0x87, 0x99, 0x12, 0x8c, 0xe9, 0x9c, 0x8f, 0xe8, 0x1c, 0x63, 0xbe, 0x06, + 0x25, 0x87, 0x3c, 0x22, 0x36, 0xdd, 0x8a, 0x86, 0xd7, 0xdf, 0x23, 0xb6, 0x78, 0x6a, 0x2c, 0xfa, + 0xe4, 0x2d, 0x46, 0x45, 0x2f, 0x43, 0x61, 0x00, 0x64, 0x97, 0xa2, 0x09, 0x16, 0xaa, 0xbc, 0x4f, + 0xec, 0x90, 0x03, 0x97, 0xaa, 0xbd, 0x67, 0xaa, 0x87, 0xa3, 0x1a, 0xa4, 0xb8, 0xda, 0x74, 0x22, + 0xb0, 0x72, 0xed, 0xb3, 0x24, 0x64, 0xd8, 0xe5, 0xcb, 0x52, 0x68, 0x4a, 0xe6, 0x68, 0x3c, 0x64, + 0xc7, 0xb5, 0x69, 0xcf, 0xce, 0xd2, 0x80, 0xde, 0xc7, 0x28, 0x71, 0x87, 0xd1, 0xd0, 0xab, 0x30, + 0xc9, 0x20, 0x61, 0x97, 0xac, 0xc5, 0x70, 0x89, 0x4e, 0x05, 0xed, 0x1a, 0x8d, 0x40, 0xe2, 0xfb, + 0x47, 0x60, 0x19, 0xa6, 0x5d, 0x5b, 0x61, 0xfd, 0xea, 0xe8, 0x92, 0xcc, 0x3d, 0x8b, 0x93, 0x47, + 0x4f, 0x67, 0x0b, 0x1d, 0x0a, 0x68, 0x2d, 0x8b, 0x6a, 0x81, 0x18, 0xbe, 0xa5, 0x06, 0xd5, 0x68, + 0xc0, 0x94, 0x63, 0x29, 0x46, 0x48, 0xc8, 0x04, 0x13, 0xc2, 0x3a, 0x60, 0x6a, 0xff, 0x40, 0xc6, + 0x24, 0x45, 0x8f, 0x8a, 0xe8, 0xc0, 0x05, 0xb1, 0x5b, 0x23, 0x25, 0x31, 0xef, 0x2e, 0x9e, 0x3d, + 0x7a, 0x3a, 0x8b, 0xf8, 0x26, 0x1f, 0x91, 0x77, 0xce, 0x1a, 0xd2, 0x46, 0xa4, 0xbe, 0x0e, 0xe7, + 0x86, 0x17, 0xb6, 0x51, 0x89, 0x69, 0x16, 0xaf, 0xa9, 0xc1, 0x05, 0x2d, 0xc8, 0x76, 0x13, 0xa6, + 0x89, 0x11, 0x95, 0x66, 0x19, 0xc6, 0x84, 0x88, 0x11, 0xca, 0xb0, 0x4b, 0x00, 0x0f, 0x75, 0x43, + 0xe5, 0xfb, 0x98, 0x3d, 0x9a, 0x24, 0x70, 0x96, 0x52, 0xd8, 0x46, 0x5d, 0x4c, 0xf1, 0x9d, 0x5f, + 0xfb, 0x57, 0x28, 0xd1, 0x60, 0x6c, 0x12, 0xd7, 0xd6, 0xbb, 0xab, 0x8a, 0xa7, 0x11, 0x54, 0x07, + 0xb4, 0xdf, 0x33, 0x95, 0x88, 0x92, 0x48, 0x43, 0x5e, 0x66, 0x73, 0xc1, 0x95, 0xae, 0x43, 0x59, + 0x37, 0xdc, 0xe8, 0x04, 0x29, 0xea, 0x46, 0x10, 0xbb, 0x58, 0x84, 0x3c, 0x6f, 0xa9, 0x38, 0xba, + 0xf6, 0xff, 0x71, 0x98, 0x1c, 0xae, 0xbf, 0xe3, 0xf5, 0xfb, 0x8a, 0x7d, 0x48, 0xeb, 0x6c, 0xd7, + 0xf4, 0x8c, 0x28, 0x0d, 0x70, 0x99, 0xcd, 0x04, 0xd7, 0x9f, 0x83, 0xb2, 0xe3, 0xf5, 0xa3, 0xf6, + 0x6c, 0xd1, 0xf1, 0xfa, 0x41, 0xe4, 0x87, 0x50, 0xfa, 0xd8, 0xa3, 0x5d, 0x75, 0x8f, 0xf8, 0xf5, + 0x8d, 0xa7, 0xe8, 0xad, 0xe8, 0x14, 0x1d, 0xd1, 0xaa, 0xce, 0x1c, 0xd7, 0x70, 0xff, 0x41, 0x48, + 0xc0, 0x45, 0x5f, 0x16, 0x2f, 0x7d, 0x95, 0x7f, 0x86, 0xd2, 0x31, 0x08, 0x6d, 0x10, 0x7d, 0x10, + 0x53, 0x5f, 0xc2, 0x83, 0x31, 0x35, 0x32, 0xe8, 0x8a, 0x11, 0xc5, 0xcb, 0x6c, 0x26, 0xb8, 0x6d, + 0xbf, 0x88, 0x43, 0x61, 0x64, 0xd7, 0x44, 0xd6, 0xee, 0x77, 0x20, 0xc5, 0xa5, 0x8d, 0x7f, 0xef, + 0x1c, 0x11, 0x22, 0x9a, 0x9b, 0xb5, 0x18, 0x16, 0x7c, 0xe8, 0x65, 0xc8, 0xf3, 0x62, 0x20, 0x12, + 0x27, 0x21, 0x4a, 0x42, 0x8e, 0x53, 0x99, 0x81, 0x95, 0xff, 0x95, 0x20, 0x25, 0x0e, 0xb5, 0x5b, + 0x83, 0xc7, 0x8f, 0x40, 0x5f, 0x12, 0x55, 0xb4, 0x61, 0x58, 0xb4, 0x23, 0x8f, 0xb9, 0xc4, 0xc8, + 0x31, 0x87, 0xee, 0xc2, 0xf9, 0xae, 0x62, 0xc8, 0x7b, 0x44, 0xfe, 0xc8, 0x31, 0x0d, 0x99, 0x18, + 0x5d, 0x53, 0x25, 0xaa, 0xac, 0xd8, 0xb6, 0x72, 0x28, 0xbe, 0xe0, 0x4c, 0x77, 0x15, 0x63, 0x91, + 0xac, 0x3b, 0xa6, 0xd1, 0xe4, 0xb3, 0x0d, 0x3a, 0xb9, 0x98, 0x16, 0x6f, 0x3b, 0xb5, 0x2f, 0xe3, + 0x00, 0xc3, 0x28, 0x46, 0xfa, 0xeb, 0x32, 0xbb, 0x16, 0x75, 0x6d, 0x9d, 0xdd, 0xa6, 0xc4, 0x6b, + 0x50, 0x90, 0x44, 0xb9, 0x3c, 0x43, 0x77, 0xb9, 0x1f, 0x30, 0xfb, 0x7d, 0xac, 0xc8, 0x25, 0xff, + 0x4c, 0xc7, 0xcc, 0x44, 0xf4, 0x31, 0xf3, 0x06, 0x4c, 0x68, 0x74, 0x5b, 0xce, 0x10, 0x16, 0xd1, + 0x97, 0x5e, 0x94, 0xa9, 0x6c, 0xff, 0xae, 0xc5, 0x30, 0xe7, 0x40, 0x6f, 0x43, 0xda, 0xe1, 0xb9, + 0x3b, 0xb3, 0x3f, 0xee, 0xfd, 0x39, 0x94, 0xe6, 0x6b, 0x31, 0xec, 0x73, 0xd1, 0x22, 0xa1, 0x2a, + 0xae, 0x52, 0xfb, 0x9d, 0x04, 0x88, 0x3d, 0xe6, 0x19, 0xaa, 0x65, 0xb2, 0x1d, 0x6d, 0xec, 0xeb, + 0x1a, 0x3a, 0x0f, 0x09, 0xcf, 0xee, 0x71, 0x87, 0x2e, 0xa6, 0x8f, 0x9e, 0xce, 0x26, 0x76, 0xf1, + 0x06, 0xa6, 0x34, 0xf4, 0x2e, 0xa4, 0x1f, 0x10, 0x45, 0x25, 0xb6, 0xdf, 0x41, 0xdc, 0x1c, 0xf3, + 0x3c, 0x38, 0x22, 0xb1, 0xbe, 0xc6, 0x79, 0xc4, 0x7b, 0x9e, 0x90, 0x40, 0x77, 0x91, 0x6e, 0x38, + 0xa4, 0xeb, 0xd9, 0xfe, 0xc7, 0xbb, 0xc1, 0x18, 0xcd, 0x40, 0x9a, 0x7a, 0xcc, 0xf4, 0x5c, 0x71, + 0x80, 0xfa, 0xc3, 0xca, 0x3d, 0xc8, 0x07, 0xc5, 0x7d, 0x9f, 0x57, 0xc0, 0x5a, 0x1b, 0xf2, 0x54, + 0x3b, 0x4c, 0xf8, 0xe3, 0xc9, 0x9f, 0xdc, 0x58, 0xd4, 0x7e, 0x1a, 0x87, 0xb3, 0xd1, 0xcf, 0xa1, + 0x68, 0x13, 0x4a, 0x44, 0x78, 0x81, 0x76, 0xe5, 0xfb, 0xba, 0xff, 0x09, 0xf1, 0xca, 0x69, 0x5c, + 0x86, 0x8b, 0x64, 0x34, 0x28, 0xf7, 0x20, 0x63, 0x0b, 0xb5, 0x45, 0x11, 0xa8, 0x46, 0xcb, 0xf1, + 0x8d, 0xc3, 0x03, 0x3c, 0xba, 0x03, 0xe9, 0x3e, 0xcb, 0x05, 0xbf, 0x2e, 0x5e, 0x7c, 0x51, 0xc2, + 0x60, 0x1f, 0x8c, 0x6e, 0xc0, 0x04, 0x3d, 0x24, 0xfd, 0xbd, 0x50, 0x89, 0xe6, 0xa2, 0xa7, 0x21, + 0xe6, 0x40, 0xf4, 0x1a, 0x24, 0x7b, 0xa6, 0xe6, 0x7f, 0x7c, 0x3c, 0x1f, 0xcd, 0xb0, 0x61, 0x6a, + 0x98, 0xc1, 0x6a, 0x3f, 0x93, 0xa0, 0x7c, 0xfc, 0x2a, 0x8b, 0xde, 0x84, 0x4c, 0xd7, 0x34, 0x1c, + 0x57, 0x31, 0x5c, 0xe1, 0xb1, 0x17, 0xb7, 0xa9, 0x6b, 0x31, 0x3c, 0x60, 0x40, 0x0b, 0xc7, 0x2a, + 0xe5, 0xd8, 0xeb, 0x69, 0xa0, 0x36, 0x2e, 0x40, 0x72, 0xdf, 0x33, 0xba, 0xe2, 0x23, 0xd0, 0xc5, + 0x71, 0x8b, 0xad, 0x78, 0x46, 0x77, 0x2d, 0x86, 0x19, 0x76, 0x58, 0x8d, 0x7e, 0x1e, 0x87, 0x5c, + 0x40, 0x19, 0x34, 0x0f, 0x59, 0xba, 0xb7, 0x4e, 0x2a, 0x9b, 0x19, 0x55, 0xfc, 0x42, 0xb3, 0x00, + 0x7b, 0xa6, 0xd9, 0x93, 0x87, 0x29, 0x9b, 0x59, 0x8b, 0xe1, 0x2c, 0xa5, 0x71, 0x89, 0x2f, 0x41, + 0x4e, 0x37, 0xdc, 0x3b, 0xb7, 0x03, 0x95, 0x9b, 0x1e, 0xc1, 0xa0, 0x0f, 0xde, 0x70, 0xd1, 0x55, + 0x28, 0xb0, 0xe3, 0x7b, 0x00, 0xa2, 0x7b, 0x46, 0x5a, 0x8b, 0xe1, 0xbc, 0x20, 0x73, 0xd8, 0xf1, + 0x43, 0x60, 0x22, 0xe2, 0x10, 0x40, 0x73, 0xc0, 0x6a, 0xd5, 0x9d, 0xdb, 0xb2, 0xe1, 0x08, 0x5c, + 0x4a, 0x2c, 0x59, 0xe0, 0x13, 0x5b, 0x0e, 0x47, 0xde, 0x85, 0x82, 0xa7, 0x1b, 0xee, 0xcd, 0x85, + 0xbb, 0x02, 0xc7, 0xbf, 0xb1, 0x4c, 0x0e, 0xcd, 0xdd, 0x6d, 0xb1, 0x69, 0xf6, 0xed, 0x82, 0x23, + 0x79, 0x97, 0xe2, 0x7b, 0x6f, 0x3d, 0x99, 0xc9, 0x94, 0xb3, 0xb5, 0x6f, 0x24, 0x80, 0xa1, 0x8f, + 0x23, 0x2b, 0xfa, 0x3d, 0xc8, 0xea, 0x86, 0xee, 0xca, 0x8a, 0xad, 0x9d, 0xf2, 0xf2, 0x92, 0xa1, + 0xf8, 0x86, 0xad, 0x39, 0xe8, 0x0e, 0x24, 0x19, 0x5b, 0xe2, 0xd4, 0x2f, 0x5f, 0x0c, 0x2f, 0x3e, + 0x77, 0xf2, 0xf2, 0x13, 0xd7, 0x55, 0x74, 0x0f, 0x4a, 0x94, 0x2e, 0x0f, 0xe2, 0xcb, 0xf3, 0x3c, + 0x3a, 0xc0, 0x05, 0x0a, 0xf5, 0x47, 0x4e, 0xed, 0xf7, 0x71, 0x38, 0x13, 0xf1, 0xcc, 0x35, 0xb0, + 0x35, 0x31, 0xce, 0xd6, 0xe4, 0xf7, 0xb3, 0xf5, 0x2d, 0x61, 0x2b, 0xdf, 0x80, 0xaf, 0x9c, 0xea, + 0xad, 0xad, 0xde, 0xb0, 0xb5, 0x11, 0x93, 0x53, 0x2f, 0x32, 0x39, 0x7d, 0x4a, 0x93, 0x2b, 0xff, + 0x06, 0x89, 0x86, 0xad, 0xfd, 0xc5, 0xb7, 0xf3, 0x70, 0x6b, 0x2e, 0x0c, 0xba, 0x19, 0xea, 0x65, + 0x53, 0x25, 0xe2, 0x6a, 0xce, 0x7e, 0xd3, 0x53, 0x22, 0x78, 0x19, 0xe7, 0x83, 0xeb, 0xbf, 0x8d, + 0x43, 0x3e, 0xf8, 0xe5, 0x19, 0x9d, 0x87, 0xe9, 0xf6, 0x76, 0x13, 0x37, 0x3a, 0x6d, 0x2c, 0x77, + 0x3e, 0xd8, 0x6e, 0xca, 0xbb, 0x5b, 0xef, 0x6e, 0xb5, 0xdf, 0xdf, 0x2a, 0xc7, 0xd0, 0x05, 0x38, + 0xbb, 0xd9, 0xdc, 0x6c, 0xe3, 0x0f, 0xe4, 0x9d, 0xf6, 0x2e, 0x5e, 0x6a, 0xca, 0x3e, 0xb0, 0xfc, + 0x3c, 0x8d, 0xce, 0xc3, 0xd4, 0x2a, 0xde, 0x5e, 0x0a, 0x4d, 0xfd, 0x2a, 0x43, 0xa7, 0xe8, 0x9d, + 0x3d, 0x34, 0xf5, 0x45, 0x16, 0x55, 0x60, 0xba, 0xb9, 0xb9, 0xdd, 0x09, 0x4b, 0xfc, 0x6f, 0x40, + 0x93, 0x90, 0xdf, 0x6c, 0x6c, 0x0f, 0x49, 0x4f, 0x4a, 0xe8, 0x1c, 0xa0, 0xc6, 0xea, 0x2a, 0x6e, + 0xae, 0x36, 0x3a, 0x01, 0xec, 0x4f, 0xca, 0x68, 0x0a, 0x4a, 0x2b, 0xad, 0x8d, 0x4e, 0x13, 0x0f, + 0xa9, 0xff, 0x33, 0x89, 0xce, 0x40, 0x71, 0xa3, 0xb5, 0xd9, 0xea, 0x0c, 0x89, 0x7f, 0x60, 0xc4, + 0xdd, 0xad, 0x56, 0x7b, 0x6b, 0x48, 0xfc, 0x06, 0x21, 0x04, 0x85, 0xf5, 0x76, 0x2b, 0x40, 0xfb, + 0xc5, 0x19, 0xaa, 0xb6, 0x6f, 0x6e, 0x6b, 0xeb, 0xdd, 0xe1, 0xd4, 0xe7, 0x2b, 0x54, 0x0f, 0x6e, + 0xec, 0xc8, 0xc4, 0x67, 0xab, 0xa8, 0x0a, 0xe7, 0xdb, 0x9d, 0xe6, 0x86, 0xdc, 0xfc, 0xc7, 0xed, + 0x36, 0xee, 0x1c, 0x9b, 0xff, 0x6e, 0x75, 0xf1, 0xfe, 0x93, 0x67, 0xd5, 0xd8, 0xd7, 0xcf, 0xaa, + 0xb1, 0xef, 0x9e, 0x55, 0xa5, 0x4f, 0x8f, 0xaa, 0xd2, 0xe7, 0x47, 0x55, 0xe9, 0x97, 0x47, 0x55, + 0xe9, 0xc9, 0x51, 0x55, 0xfa, 0xe6, 0xa8, 0x2a, 0x3d, 0x3f, 0xaa, 0xc6, 0xbe, 0x3b, 0xaa, 0x4a, + 0xff, 0xf5, 0x6d, 0x35, 0xf6, 0xe4, 0xdb, 0x6a, 0xec, 0xeb, 0x6f, 0xab, 0xb1, 0x7f, 0x4a, 0xf1, + 0xd0, 0xef, 0xa5, 0xd8, 0xf7, 0xac, 0x5b, 0x7f, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x9b, 0xb9, 0x47, + 0x05, 0xdc, 0x24, 0x00, 0x00, } func (x OperatorType) String() string { @@ -3771,6 +3783,14 @@ func (this *Operator) Equal(that interface{}) bool { } else if !this.Op.Equal(that1.Op) { return false } + if len(this.Context) != len(that1.Context) { + return false + } + for i := range this.Context { + if this.Context[i] != that1.Context[i] { + return false + } + } return true } func (this *Operator_MemSourceOp) Equal(that interface{}) bool { @@ -6005,12 +6025,25 @@ func (this *Operator) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 18) + s := make([]string, 0, 19) s = append(s, "&planpb.Operator{") s = append(s, "OpType: "+fmt.Sprintf("%#v", this.OpType)+",\n") if this.Op != nil { s = append(s, "Op: "+fmt.Sprintf("%#v", this.Op)+",\n") } + keysForContext := make([]string, 0, len(this.Context)) + for k, _ := range this.Context { + keysForContext = append(keysForContext, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForContext) + mapStringForContext := "map[string]string{" + for _, k := range keysForContext { + mapStringForContext += fmt.Sprintf("%#v: %#v,", k, this.Context[k]) + } + mapStringForContext += "}" + if this.Context != nil { + s = append(s, "Context: "+mapStringForContext+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -7190,6 +7223,25 @@ func (m *Operator) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } } + if len(m.Context) > 0 { + for k := range m.Context { + v := m.Context[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintPlan(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintPlan(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintPlan(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x7a + } + } if m.OpType != 0 { i = encodeVarintPlan(dAtA, i, uint64(m.OpType)) i-- @@ -9871,6 +9923,14 @@ func (m *Operator) Size() (n int) { if m.Op != nil { n += m.Op.Size() } + if len(m.Context) > 0 { + for k, v := range m.Context { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovPlan(uint64(len(k))) + 1 + len(v) + sovPlan(uint64(len(v))) + n += mapEntrySize + 1 + sovPlan(uint64(mapEntrySize)) + } + } return n } @@ -11172,9 +11232,20 @@ func (this *Operator) String() string { if this == nil { return "nil" } + keysForContext := make([]string, 0, len(this.Context)) + for k, _ := range this.Context { + keysForContext = append(keysForContext, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForContext) + mapStringForContext := "map[string]string{" + for _, k := range keysForContext { + mapStringForContext += fmt.Sprintf("%v: %v,", k, this.Context[k]) + } + mapStringForContext += "}" s := strings.Join([]string{`&Operator{`, `OpType:` + fmt.Sprintf("%v", this.OpType) + `,`, `Op:` + fmt.Sprintf("%v", this.Op) + `,`, + `Context:` + mapStringForContext + `,`, `}`, }, "") return s @@ -13522,6 +13593,133 @@ func (m *Operator) Unmarshal(dAtA []byte) error { } m.Op = &Operator_OTelSinkOp{v} iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Context == nil { + m.Context = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthPlan + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthPlan + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthPlan + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthPlan + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlan + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Context[mapkey] = mapvalue + iNdEx = postIndex case 1000: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field GRPCSinkOp", wireType) diff --git a/src/carnot/planpb/plan.proto b/src/carnot/planpb/plan.proto index c7bcb552dda..e1f91935fa7 100644 --- a/src/carnot/planpb/plan.proto +++ b/src/carnot/planpb/plan.proto @@ -115,6 +115,7 @@ enum OperatorType { MEMORY_SINK_OPERATOR = 9000; GRPC_SINK_OPERATOR = 9100; OTEL_EXPORT_SINK_OPERATOR = 9200; + // TODO(ddelnano): 10001 - 11000 are reserved for future use (sink_results table) } // The Logical operation performed. Each operator needs and entry in this @@ -150,6 +151,7 @@ message Operator { // OTelExportSinkOperator writes the input table to an OpenTelemetry endpoint. OTelExportSinkOperator otel_sink_op = 14 [ (gogoproto.customname) = "OTelSinkOp" ]; } + map context = 15; } // Fetches data from in-memory source. diff --git a/src/carnot/planpb/test_proto.h b/src/carnot/planpb/test_proto.h index 0ca5a1c37a4..a8a6cf14745 100644 --- a/src/carnot/planpb/test_proto.h +++ b/src/carnot/planpb/test_proto.h @@ -1024,6 +1024,72 @@ constexpr char kPlanWithTwoSourcesWithLimits[] = R"proto( } )proto"; +constexpr char kPlanWithOTelExport[] = R"proto( + id: 1, + dag { + nodes { + id: 1 + sorted_children: 2 + } + nodes { + id: 2 + sorted_parents: 1 + } + } + nodes { + id: 1 + op { + op_type: MEMORY_SOURCE_OPERATOR + context: { + key: "mutation_id" + value: "mutation" + } + mem_source_op { + name: "numbers" + column_idxs: 0 + column_types: INT64 + column_names: "a" + column_idxs: 1 + column_types: BOOLEAN + column_names: "b" + column_idxs: 2 + column_types: FLOAT64 + column_names: "c" + } + } + } + nodes { + id: 2 + op { + op_type: OTEL_EXPORT_SINK_OPERATOR + context: { + key: "mutation_id" + value: "mutation" + } + otel_sink_op { + endpoint_config { + url: "0.0.0.0:55690" + headers { + key: "apikey" + value: "12345" + } + timeout: 5 + } + resource { + attributes { + name: "service.name" + column { + column_type: STRING + column_index: 1 + can_be_json_encoded_array: true + } + } + } + } + } + } +)proto"; + constexpr char kOneLimit3Sources[] = R"proto( id: 1, dag { diff --git a/src/common/json/json.h b/src/common/json/json.h index 7dab5ceef7e..d4e38338d2d 100644 --- a/src/common/json/json.h +++ b/src/common/json/json.h @@ -126,6 +126,27 @@ std::string ToJSONString(const T& x) { return sb.GetString(); } +inline std::string RapidJSONTypeToString(rapidjson::Type type) { + switch (type) { + case rapidjson::kNullType: + return "Null"; + case rapidjson::kFalseType: + return "False"; + case rapidjson::kTrueType: + return "True"; + case rapidjson::kObjectType: + return "Object"; + case rapidjson::kArrayType: + return "Array"; + case rapidjson::kStringType: + return "String"; + case rapidjson::kNumberType: + return "Number"; + default: + return "Unknown"; + } +} + /* * Exposes a limited set of APIs to build JSON string, with mixed data structures; which could not * be processed by the above ToJSONString(). diff --git a/src/common/testing/protobuf.h b/src/common/testing/protobuf.h index dfd6091a4e6..07da54be26a 100644 --- a/src/common/testing/protobuf.h +++ b/src/common/testing/protobuf.h @@ -66,7 +66,7 @@ struct ProtoMatcher { } virtual void DescribeTo(::std::ostream* os) const { - *os << "equals to text probobuf: " << expected_text_pb_; + *os << "equals to text protobuf: " << expected_text_pb_; } virtual void DescribeNegationTo(::std::ostream* os) const { @@ -97,7 +97,7 @@ struct PartiallyEqualsProtoMatcher : public ProtoMatcher { } void DescribeTo(::std::ostream* os) const override { - *os << "partially equals to text probobuf: " << expected_text_pb_; + *os << "partially equals to text protobuf: " << expected_text_pb_; } void DescribeNegationTo(::std::ostream* os) const override { diff --git a/src/common/uuid/uuid_utils.h b/src/common/uuid/uuid_utils.h index 90207d75491..792a79453e3 100644 --- a/src/common/uuid/uuid_utils.h +++ b/src/common/uuid/uuid_utils.h @@ -49,6 +49,10 @@ inline void ClearUUID(sole::uuid* uuid) { uuid->cd = 0; } +inline bool operator==(const px::uuidpb::UUID& lhs, const px::uuidpb::UUID& rhs) { + return lhs.low_bits() == rhs.low_bits() && lhs.high_bits() == rhs.high_bits(); +} + } // namespace px // Allow UUID to be logged. diff --git a/src/experimental/standalone_pem/file_source_manager.cc b/src/experimental/standalone_pem/file_source_manager.cc new file mode 100644 index 00000000000..11727480abd --- /dev/null +++ b/src/experimental/standalone_pem/file_source_manager.cc @@ -0,0 +1,195 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "src/common/base/base.h" +#include "src/experimental/standalone_pem/file_source_manager.h" + +constexpr auto kUpdateInterval = std::chrono::seconds(2); + +namespace px { +namespace vizier { +namespace agent { + +FileSourceManager::FileSourceManager(px::event::Dispatcher* dispatcher, + stirling::Stirling* stirling, + table_store::TableStore* table_store) + : dispatcher_(dispatcher), stirling_(stirling), table_store_(table_store) { + file_source_monitor_timer_ = + dispatcher_->CreateTimer(std::bind(&FileSourceManager::Monitor, this)); + // Kick off the background monitor. + file_source_monitor_timer_->EnableTimer(kUpdateInterval); +} + +std::string FileSourceManager::DebugString() const { + std::lock_guard lock(mu_); + std::stringstream ss; + auto now = std::chrono::steady_clock::now(); + ss << absl::Substitute("File Source Manager Debug State:\n"); + ss << absl::Substitute("ID\tNAME\tCURRENT_STATE\tEXPECTED_STATE\tlast_updated\n"); + for (const auto& [id, file_source] : file_sources_) { + ss << absl::Substitute( + "$0\t$1\t$2\t$3\t$4 seconds\n", id.str(), file_source.name, + statuspb::LifeCycleState_Name(file_source.current_state), + statuspb::LifeCycleState_Name(file_source.expected_state), + std::chrono::duration_cast(now - file_source.last_updated_at) + .count()); + } + return ss.str(); +} + +Status FileSourceManager::HandleRegisterFileSourceRequest(sole::uuid id, std::string file_name) { + LOG(INFO) << "Registering file source: " << file_name; + + FileSourceInfo info; + info.name = file_name; + info.id = id; + info.expected_state = statuspb::RUNNING_STATE; + info.current_state = statuspb::PENDING_STATE; + info.last_updated_at = dispatcher_->GetTimeSource().MonotonicTime(); + stirling_->RegisterFileSource(id, file_name); + { + std::lock_guard lock(mu_); + file_sources_[id] = std::move(info); + file_source_name_map_[file_name] = id; + } + return Status::OK(); +} + +Status FileSourceManager::HandleRemoveFileSourceRequest( + sole::uuid id, const messages::FileSourceMessage& /*msg*/) { + std::lock_guard lock(mu_); + auto it = file_sources_.find(id); + if (it == file_sources_.end()) { + return error::NotFound("File source with ID: $0, not found", id.str()); + } + + it->second.expected_state = statuspb::TERMINATED_STATE; + return stirling_->RemoveFileSource(id); +} + +void FileSourceManager::Monitor() { + std::lock_guard lock(mu_); + + for (auto& [id, file_source] : file_sources_) { + auto s_or_publish = stirling_->GetFileSourceInfo(id); + statuspb::LifeCycleState current_state; + // Get the latest current state according to stirling. + if (s_or_publish.ok()) { + current_state = statuspb::RUNNING_STATE; + } else { + switch (s_or_publish.code()) { + case statuspb::FAILED_PRECONDITION: + // Means the binary has not been found. + current_state = statuspb::FAILED_STATE; + break; + case statuspb::RESOURCE_UNAVAILABLE: + current_state = statuspb::PENDING_STATE; + break; + case statuspb::NOT_FOUND: + // Means we didn't actually find the probe. If we requested termination, + // it's because the probe has been removed. + current_state = (file_source.expected_state == statuspb::TERMINATED_STATE) + ? statuspb::TERMINATED_STATE + : statuspb::UNKNOWN_STATE; + break; + default: + current_state = statuspb::FAILED_STATE; + break; + } + } + + if (current_state != statuspb::RUNNING_STATE && + file_source.expected_state == statuspb::TERMINATED_STATE) { + current_state = statuspb::TERMINATED_STATE; + } + + if (current_state == file_source.current_state) { + // No state transition, nothing to do. + continue; + } + + // The following transitions are legal: + // 1. Pending -> Terminated: Probe is stopped before starting. + // 2. Pending -> Running : Probe starts up. + // 3. Running -> Terminated: Probe is stopped. + // 4. Running -> Failed: Probe got dettached because binary died. + // 5. Failed -> Running: Probe started up because binary came back to life. + // + // In all cases we basically inform the MDS. + // In the cases where we transition to running, we need to update the schemas. + + Status probe_status = Status::OK(); + LOG(INFO) << absl::Substitute("File source[$0]::$1 has transitioned $2 -> $3", id.str(), + file_source.name, + statuspb::LifeCycleState_Name(file_source.current_state), + statuspb::LifeCycleState_Name(current_state)); + // Check if running now, then update the schema. + if (current_state == statuspb::RUNNING_STATE) { + // We must have just transitioned into running. We try to apply the new schema. + // If it fails we will trigger an error and report that to MDS. + auto publish_pb = s_or_publish.ConsumeValueOrDie(); + auto s = UpdateSchema(publish_pb); + if (!s.ok()) { + current_state = statuspb::FAILED_STATE; + probe_status = s; + } + } else { + probe_status = s_or_publish.status(); + } + + file_source.current_state = current_state; + } + file_source_monitor_timer_->EnableTimer(kUpdateInterval); +} + +Status FileSourceManager::UpdateSchema(const stirling::stirlingpb::Publish& publish_pb) { + LOG(INFO) << "Updating schema for file source"; + auto relation_info_vec = ConvertPublishPBToRelationInfo(publish_pb); + + // TODO(ddelnano): Failure here can lead to an inconsistent schema state. We should + // figure out how to handle this as part of the data model refactor project. + for (const auto& relation_info : relation_info_vec) { + LOG(INFO) << absl::Substitute("Adding table: $0", relation_info.name); + table_store_->AddTable( + table_store::HotOnlyTable::Create(relation_info.name, relation_info.relation), + relation_info.name, relation_info.id); + } + return Status::OK(); +} + +FileSourceInfo* FileSourceManager::GetFileSourceInfo(std::string name) { + std::lock_guard lock(mu_); + auto pair = file_source_name_map_.find(name); + if (pair == file_source_name_map_.end()) { + return nullptr; + } + + auto id_pair = file_sources_.find(pair->second); + if (id_pair == file_sources_.end()) { + return nullptr; + } + + return &id_pair->second; +} + +} // namespace agent +} // namespace vizier +} // namespace px diff --git a/src/experimental/standalone_pem/file_source_manager.h b/src/experimental/standalone_pem/file_source_manager.h new file mode 100644 index 00000000000..7e426bc69be --- /dev/null +++ b/src/experimental/standalone_pem/file_source_manager.h @@ -0,0 +1,71 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include + +#include + +#include "src/stirling/stirling.h" +#include "src/vizier/services/agent/shared/manager/manager.h" + +namespace px { +namespace vizier { +namespace agent { + +struct FileSourceInfo { + std::string name; + sole::uuid id; + statuspb::LifeCycleState expected_state; + statuspb::LifeCycleState current_state; + std::chrono::time_point last_updated_at; +}; + +class FileSourceManager { + public: + FileSourceManager() = delete; + FileSourceManager(px::event::Dispatcher* dispatcher, stirling::Stirling* stirling, + table_store::TableStore* table_store); + + std::string DebugString() const; + Status HandleRegisterFileSourceRequest(sole::uuid id, std::string file_name); + Status HandleRemoveFileSourceRequest(sole::uuid id, const messages::FileSourceMessage& req); + FileSourceInfo* GetFileSourceInfo(std::string name); + + private: + // The tracepoint Monitor that is responsible for watching and updating the state of + // active tracepoints. + void Monitor(); + Status UpdateSchema(const stirling::stirlingpb::Publish& publish_proto); + + px::event::Dispatcher* dispatcher_; + stirling::Stirling* stirling_; + table_store::TableStore* table_store_; + + event::TimerUPtr file_source_monitor_timer_; + mutable std::mutex mu_; + absl::flat_hash_map file_sources_; + // File source name to UUID. + absl::flat_hash_map file_source_name_map_; +}; + +} // namespace agent +} // namespace vizier +} // namespace px diff --git a/src/experimental/standalone_pem/standalone_pem_manager.cc b/src/experimental/standalone_pem/standalone_pem_manager.cc index d1257dbdbfd..312b189d327 100644 --- a/src/experimental/standalone_pem/standalone_pem_manager.cc +++ b/src/experimental/standalone_pem/standalone_pem_manager.cc @@ -73,6 +73,7 @@ StandalonePEMManager::StandalonePEMManager(sole::uuid agent_id, std::string_view dispatcher_(api_->AllocateDispatcher("manager")), table_store_(std::make_shared()), func_context_(this, /* mds_stub= */ nullptr, /* mdtp_stub= */ nullptr, + /* mdfs_stub= */ nullptr, /* cronscript_stub= */ nullptr, table_store_, [](grpc::ClientContext*) {}), stirling_(px::stirling::Stirling::Create(px::stirling::CreateSourceRegistryFromFlag())), results_sink_server_(std::make_unique()) { @@ -102,11 +103,16 @@ StandalonePEMManager::StandalonePEMManager(sole::uuid agent_id, std::string_view std::move(clients_config), std::move(server_config)) .ConsumeValueOrDie(); + const std::string proc_pid_path = std::string("/proc/") + std::to_string(info_.pid); + PX_ASSIGN_OR_RETURN(auto start_time, system::GetPIDStartTimeTicks(proc_pid_path)); + mds_manager_ = std::make_unique( - info_.hostname, info_.asid, info_.pid, info_.agent_id, time_system_.get()); + info_.hostname, info_.asid, info_.pid, start_time, info_.agent_id, time_system_.get()); tracepoint_manager_ = std::make_unique(dispatcher_.get(), stirling_.get(), table_store_.get()); + file_source_manager_ = + std::make_unique(dispatcher_.get(), stirling_.get(), table_store_.get()); // Force Metadata Update. ECHECK_OK(mds_manager_->PerformMetadataStateUpdate()); } @@ -146,9 +152,9 @@ Status StandalonePEMManager::Init() { stirling_->RegisterAgentMetadataCallback( std::bind(&px::md::AgentMetadataStateManager::CurrentAgentMetadataState, mds_manager_.get())); - vizier_grpc_server_ = - std::make_unique(port_, carnot_.get(), results_sink_server_.get(), - carnot_->GetEngineState(), tracepoint_manager_.get()); + vizier_grpc_server_ = std::make_unique( + port_, carnot_.get(), results_sink_server_.get(), carnot_->GetEngineState(), + tracepoint_manager_.get(), file_source_manager_.get()); return Status::OK(); } @@ -211,20 +217,20 @@ Status StandalonePEMManager::InitSchemas() { // Special case to set the max size of the http_events table differently from the other // tables. For now, the min cold batch size is set to 256kB to be consistent with previous // behaviour. - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - http_table_size, 256 * 1024); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, http_table_size, 256 * 1024); } else if (relation_info.name == "stirling_error") { - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - stirling_error_table_size); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, stirling_error_table_size); } else if (relation_info.name == "probe_status") { - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - probe_status_table_size); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, probe_status_table_size); } else if (relation_info.name == "proc_exit_events") { - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - proc_exit_events_table_size); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, proc_exit_events_table_size); } else { - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - other_table_size); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, other_table_size); } table_store_->AddTable(std::move(table_ptr), relation_info.name, relation_info.id); diff --git a/src/experimental/standalone_pem/standalone_pem_manager.h b/src/experimental/standalone_pem/standalone_pem_manager.h index 9d658b1306a..99af95bddbc 100644 --- a/src/experimental/standalone_pem/standalone_pem_manager.h +++ b/src/experimental/standalone_pem/standalone_pem_manager.h @@ -23,6 +23,7 @@ #include "src/carnot/carnot.h" #include "src/common/event/event.h" +#include "src/experimental/standalone_pem/file_source_manager.h" #include "src/experimental/standalone_pem/sink_server.h" #include "src/experimental/standalone_pem/tracepoint_manager.h" #include "src/experimental/standalone_pem/vizier_server.h" @@ -87,6 +88,9 @@ class StandalonePEMManager : public BaseManager { // Tracepoints std::unique_ptr tracepoint_manager_; + + // FileSource manager + std::unique_ptr file_source_manager_; }; } // namespace agent diff --git a/src/experimental/standalone_pem/tracepoint_manager.cc b/src/experimental/standalone_pem/tracepoint_manager.cc index f05772f0a04..240050d74b9 100644 --- a/src/experimental/standalone_pem/tracepoint_manager.cc +++ b/src/experimental/standalone_pem/tracepoint_manager.cc @@ -178,8 +178,9 @@ Status TracepointManager::UpdateSchema(const stirling::stirlingpb::Publish& publ // TODO(zasgar): Failure here can lead to an inconsistent schema state. We should // // figure out how to handle this as part of the data model refactor project. for (const auto& relation_info : relation_info_vec) { - table_store_->AddTable(table_store::Table::Create(relation_info.name, relation_info.relation), - relation_info.name, relation_info.id); + table_store_->AddTable( + table_store::HotColdTable::Create(relation_info.name, relation_info.relation), + relation_info.name, relation_info.id); } return Status::OK(); } diff --git a/src/experimental/standalone_pem/vizier_server.h b/src/experimental/standalone_pem/vizier_server.h index ce071bf379c..1968e0fe96d 100644 --- a/src/experimental/standalone_pem/vizier_server.h +++ b/src/experimental/standalone_pem/vizier_server.h @@ -50,11 +50,13 @@ class VizierServer final : public api::vizierpb::VizierService::Service { public: VizierServer() = delete; VizierServer(carnot::Carnot* carnot, px::vizier::agent::StandaloneGRPCResultSinkServer* svr, - px::carnot::EngineState* engine_state, TracepointManager* tp_manager) { + px::carnot::EngineState* engine_state, TracepointManager* tp_manager, + FileSourceManager* file_source_manager) { carnot_ = carnot; sink_server_ = svr; engine_state_ = engine_state; tp_manager_ = tp_manager; + file_source_manager_ = file_source_manager; } ::grpc::Status ExecuteScript( @@ -63,6 +65,7 @@ class VizierServer final : public api::vizierpb::VizierService::Service { LOG(INFO) << "Executing Script"; auto query_id = sole::uuid4(); + auto compiler_state = engine_state_->CreateLocalExecutionCompilerState(0); // Handle mutations. @@ -79,8 +82,10 @@ class VizierServer final : public api::vizierpb::VizierService::Service { auto mutations = mutations_or_s.ConsumeValueOrDie(); auto deployments = mutations->Deployments(); + auto file_source_deployments = mutations->FileSourceDeployments(); bool tracepoints_running = true; + auto ntp_info = TracepointInfo{}; for (size_t i = 0; i < deployments.size(); i++) { carnot::planner::dynamic_tracing::ir::logical::TracepointDeployment planner_tp; auto s = deployments[i]->ToProto(&planner_tp); @@ -99,7 +104,6 @@ class VizierServer final : public api::vizierpb::VizierService::Service { if (!s.ok()) { return ::grpc::Status(grpc::StatusCode::INTERNAL, "Failed to register tracepoint"); } - auto ntp_info = TracepointInfo{}; ntp_info.name = stirling_tp.name(); ntp_info.id = tp_id; ntp_info.current_state = statuspb::PENDING_STATE; @@ -117,9 +121,34 @@ class VizierServer final : public api::vizierpb::VizierService::Service { return ::grpc::Status::CANCELLED; } - auto m_info = mutation_resp.mutable_mutation_info(); - m_info->mutable_status()->set_code(0); - response->Write(mutation_resp); + auto file_sources_running = true; + auto nfile_source_info = FileSourceInfo{}; + for (size_t i = 0; i < file_source_deployments.size(); i++) { + auto file_source = file_source_deployments[i]; + auto file_source_info = file_source_manager_->GetFileSourceInfo(file_source.glob_pattern()); + if (file_source_info == nullptr) { + auto s = file_source_manager_->HandleRegisterFileSourceRequest( + sole::uuid4(), file_source.glob_pattern()); + if (!s.ok()) { + return ::grpc::Status(grpc::StatusCode::INTERNAL, "Failed to register file source"); + } + nfile_source_info.name = file_source.glob_pattern(); + nfile_source_info.current_state = statuspb::PENDING_STATE; + file_source_info = &nfile_source_info; + } + if (file_source_info->current_state != statuspb::RUNNING_STATE) { + file_sources_running = false; + } + } + if (!file_sources_running) { + auto m_info = mutation_resp.mutable_mutation_info(); + m_info->mutable_status()->set_code(grpc::StatusCode::UNAVAILABLE); + response->Write(mutation_resp); + return ::grpc::Status::CANCELLED; + } + /* auto m_info = mutation_resp.mutable_mutation_info(); */ + /* m_info->mutable_status()->set_code(0); */ + /* response->Write(mutation_resp); */ } LOG(INFO) << "Compiling and running query"; // Send schema before sending query results. @@ -201,6 +230,7 @@ class VizierServer final : public api::vizierpb::VizierService::Service { px::vizier::agent::StandaloneGRPCResultSinkServer* sink_server_; px::carnot::EngineState* engine_state_; TracepointManager* tp_manager_; + FileSourceManager* file_source_manager_; }; class VizierGRPCServer { @@ -208,8 +238,10 @@ class VizierGRPCServer { VizierGRPCServer() = delete; VizierGRPCServer(int port, carnot::Carnot* carnot, px::vizier::agent::StandaloneGRPCResultSinkServer* svr, - carnot::EngineState* engine_state, TracepointManager* tp_manager) - : vizier_server_(std::make_unique(carnot, svr, engine_state, tp_manager)) { + carnot::EngineState* engine_state, TracepointManager* tp_manager, + FileSourceManager* file_source_manager) + : vizier_server_(std::make_unique(carnot, svr, engine_state, tp_manager, + file_source_manager)) { grpc::ServerBuilder builder; std::string uri = absl::Substitute("0.0.0.0:$0", port); diff --git a/src/pxl_scripts/px/pipeline_flow_graph/manifest.yaml b/src/pxl_scripts/px/pipeline_flow_graph/manifest.yaml new file mode 100644 index 00000000000..178d1cc9659 --- /dev/null +++ b/src/pxl_scripts/px/pipeline_flow_graph/manifest.yaml @@ -0,0 +1,4 @@ +--- +short: Overview of Pipeline throughput +long: > + This view displays a summary of the throughput of the pipeline. diff --git a/src/pxl_scripts/px/pipeline_flow_graph/pipeline_flow_graph.pxl b/src/pxl_scripts/px/pipeline_flow_graph/pipeline_flow_graph.pxl new file mode 100644 index 00000000000..f8dc4466a4b --- /dev/null +++ b/src/pxl_scripts/px/pipeline_flow_graph/pipeline_flow_graph.pxl @@ -0,0 +1,82 @@ +import px + +kelvin_dest = "unknown" +bpf_source_op_start = 10000 +memory_source_op = 1000 # This corresponds to a file source +file_source_op = 2 +# TODO(ddelnano): This currently can't tell the difference +# between an internal and external grpc sink. +grpc_sink_op = 9100 +otel_export_op = 9200 + +def final_dest_to_str(dest): + return px.select(dest == otel_export_op, "Otel Export", kelvin_dest) + +def get_memory_source_sink_results(df, min_asid): + file_sources = px.GetFileSourceStatus() + file_sources.stream_id = file_sources.file_source_id + + tracepoint_sources = px.GetTracepointStatus() + tracepoint_sources.stream_id = tracepoint_sources.tracepoint_id_str + + df = df[df.destination > bpf_source_op_start or df.destination == memory_source_op] + file_sources_df = df.merge(file_sources, how='left', left_on='stream_id', right_on='file_source_id') + file_sources_df = file_sources_df['time_', 'upid', 'pod', 'name', 'bytes_transferred', 'destination', 'stream_id_x', 'stream_id_y', 'match'] + tracepoint_sources_df = df.merge(tracepoint_sources, how='left', left_on='stream_id', right_on='tracepoint_id_str') + tracepoint_sources_df = tracepoint_sources_df['time_', 'upid', 'pod', 'name', 'bytes_transferred', 'destination', 'stream_id_x', 'stream_id_y', 'match'] + + df = file_sources_df.append(tracepoint_sources_df) + + # stream_id_y is the column from the file_sources UDTF after the merge + df.is_bpf_source = df.stream_id_y == "" + df = df.merge(min_asid, how='left', left_on='match', right_on='match') + + df.to_entity = df.pod + df.from_entity = px.select(df.is_bpf_source, px.pipeline_dest_to_name(df.destination), df.name) + " " + px.itoa(px.upid_to_asid(df.upid) - df.min_asid) + df = df['time_', 'from_entity', 'to_entity', 'bytes_transferred'] + df = df.groupby(['from_entity', 'to_entity']).agg( + total_bytes=('bytes_transferred', px.sum), + ) + + return df + +def pipeline_flow_graph(start_time: str): + agents = px.GetAgentStatus() + kelvin = agents[px.contains(agents.hostname, "kelvin")] + min_asid = agents.agg(min_asid=('asid', px.min)) + min_asid.match = True + + df = px.DataFrame('sink_results', start_time=start_time) + df.pod = df.ctx['pod'] + df.match = True + + mem_source_sink_results = get_memory_source_sink_results(df, min_asid) + + df = df[df.destination == otel_export_op or df.destination == grpc_sink_op] + df.final_dest = final_dest_to_str(df.destination) + + # Use a dummy column that matches in both data frames + # so the Kelvin hostname join works as expected + kelvin.match = True + + # For external GRPC sinks, df.pod will be empty and kelvin_dest will be "unknown" + df.is_dest_kelvin = px.select(df.final_dest == kelvin_dest and df.pod != "", True, False) + df.final_dest = px.select(not df.is_dest_kelvin and df.final_dest == kelvin_dest, "px.display", df.final_dest) + df = df.merge(kelvin, how='left', left_on='match', right_on='match') + # Remove the port from the ip_address column from the GetAgentStatus UDTF + df.ip_address = px.pluck_array(px.split(df.ip_address, ":"), 0) + df.kelvin_pod = px.pod_id_to_pod_name(px.ip_to_pod_id(df.ip_address)) + + df.from_entity = px.select(df.is_dest_kelvin, df.pod, df.kelvin_pod) + df.to_entity = px.select(df.is_dest_kelvin, df.kelvin_pod, df.final_dest) + + df = df.groupby(['from_entity', 'to_entity']).agg( + total_bytes=('bytes_transferred', px.sum), + ) + + df = df.append(mem_source_sink_results) + df = df[px.substring(df.from_entity, 0, 7) != "unknown"] + df.total_time = px.abs(px.parse_duration(start_time)) / px.pow(10, 9) + df.bytes_throughput = df.total_bytes / df.total_time + return df + diff --git a/src/pxl_scripts/px/pipeline_flow_graph/vis.json b/src/pxl_scripts/px/pipeline_flow_graph/vis.json new file mode 100644 index 00000000000..aba41d05c23 --- /dev/null +++ b/src/pxl_scripts/px/pipeline_flow_graph/vis.json @@ -0,0 +1,49 @@ +{ + "variables": [ + { + "name": "start_time", + "type": "PX_STRING", + "description": "The start time of the window in time units before now.", + "defaultValue": "-5m" + } + ], + "globalFuncs": [ + { + "outputName": "pipeline_flow", + "func": { + "name": "pipeline_flow_graph", + "args": [ + { + "name": "start_time", + "variable": "start_time" + } + ] + } + } + ], + "widgets": [ + { + "name": "Pipeline Flow Graph", + "position": { + "x": 0, + "y": 0, + "w": 12, + "h": 4 + }, + "globalFuncOutputName": "pipeline_flow", + "displaySpec": { + "@type": "types.px.dev/px.vispb.Graph", + "adjacencyList": { + "fromColumn": "from_entity", + "toColumn": "to_entity" + }, + "edgeWeightColumn": "bytes_throughput", + "edgeHoverInfo": [ + "bytes_throughput" + ], + "enableDefaultHierarchy": true, + "edgeLength": 500 + } + } + ] +} diff --git a/src/shared/metadata/metadata_state.cc b/src/shared/metadata/metadata_state.cc index 098d95179c5..e09f7fa7301 100644 --- a/src/shared/metadata/metadata_state.cc +++ b/src/shared/metadata/metadata_state.cc @@ -569,7 +569,7 @@ Status K8sMetadataState::CleanupExpiredMetadata(int64_t now, int64_t retention_t std::shared_ptr AgentMetadataState::CloneToShared() const { auto state = - std::make_shared(hostname_, asid_, pid_, agent_id_, pod_name_, vizier_id_, + std::make_shared(hostname_, asid_, pid_, start_time_, agent_id_, pod_name_, vizier_id_, vizier_name_, vizier_namespace_, time_system_); state->last_update_ts_ns_ = last_update_ts_ns_; state->epoch_id_ = epoch_id_; diff --git a/src/shared/metadata/metadata_state.h b/src/shared/metadata/metadata_state.h index e2fdc9e6c86..95957de23dc 100644 --- a/src/shared/metadata/metadata_state.h +++ b/src/shared/metadata/metadata_state.h @@ -341,13 +341,14 @@ class K8sMetadataState : NotCopyable { class AgentMetadataState : NotCopyable { public: AgentMetadataState() = delete; - AgentMetadataState(std::string_view hostname, uint32_t asid, uint32_t pid, AgentID agent_id, + AgentMetadataState(std::string_view hostname, uint32_t asid, uint32_t pid, uint64_t start_time, AgentID agent_id, std::string_view pod_name, sole::uuid vizier_id, std::string_view vizier_name, std::string_view vizier_namespace, event::TimeSystem* time_system) : hostname_(std::string(hostname)), pod_name_(std::string(pod_name)), asid_(asid), pid_(pid), + start_time_(start_time), agent_id_(agent_id), vizier_id_(vizier_id), vizier_name_(std::string(vizier_name)), @@ -360,6 +361,7 @@ class AgentMetadataState : NotCopyable { uint32_t pid() const { return pid_; } const std::string& pod_name() const { return pod_name_; } const sole::uuid& agent_id() const { return agent_id_; } + const md::UPID agent_upid() const { return md::UPID(asid_, pid_, start_time_); } const sole::uuid& vizier_id() const { return vizier_id_; } const std::string& vizier_name() const { return vizier_name_; } @@ -433,6 +435,7 @@ class AgentMetadataState : NotCopyable { std::string pod_name_; uint32_t asid_; uint32_t pid_; + uint64_t start_time_; AgentID agent_id_; sole::uuid vizier_id_; diff --git a/src/shared/metadata/standalone_state_manager.h b/src/shared/metadata/standalone_state_manager.h index 82cb16030ed..a353f470682 100644 --- a/src/shared/metadata/standalone_state_manager.h +++ b/src/shared/metadata/standalone_state_manager.h @@ -35,9 +35,9 @@ namespace md { */ class StandaloneAgentMetadataStateManager : public AgentMetadataStateManager { public: - StandaloneAgentMetadataStateManager(std::string_view hostname, uint32_t asid, uint32_t pid, + StandaloneAgentMetadataStateManager(std::string_view hostname, uint32_t asid, uint32_t pid, uint64_t start_time, sole::uuid agent_id, event::TimeSystem* time_system) { - agent_metadata_state_ = std::make_shared(hostname, asid, pid, agent_id, + agent_metadata_state_ = std::make_shared(hostname, asid, pid, start_time, agent_id, /*pod_name=*/"", sole::uuid(), "standalone_pem", "", time_system); } diff --git a/src/shared/metadata/state_manager.h b/src/shared/metadata/state_manager.h index 67dec26b962..68f73f5fa37 100644 --- a/src/shared/metadata/state_manager.h +++ b/src/shared/metadata/state_manager.h @@ -119,7 +119,7 @@ class AgentMetadataStateManagerImpl : public AgentMetadataStateManager { public: virtual ~AgentMetadataStateManagerImpl() = default; - AgentMetadataStateManagerImpl(std::string_view hostname, uint32_t asid, uint32_t pid, + AgentMetadataStateManagerImpl(std::string_view hostname, uint32_t asid, uint32_t pid, uint64_t start_time, std::string pod_name, sole::uuid agent_id, bool collects_data, const px::system::Config& config, AgentMetadataFilter* metadata_filter, sole::uuid vizier_id, @@ -128,7 +128,7 @@ class AgentMetadataStateManagerImpl : public AgentMetadataStateManager { : pod_name_(pod_name), collects_data_(collects_data), metadata_filter_(metadata_filter) { md_reader_ = std::make_unique(config); agent_metadata_state_ = - std::make_shared(hostname, asid, pid, agent_id, pod_name, vizier_id, + std::make_shared(hostname, asid, pid, start_time, agent_id, pod_name, vizier_id, vizier_name, vizier_namespace, time_system); } diff --git a/src/shared/schema/utils.cc b/src/shared/schema/utils.cc index c17e5fbffb3..fde9bc093b2 100644 --- a/src/shared/schema/utils.cc +++ b/src/shared/schema/utils.cc @@ -35,13 +35,19 @@ table_store::schema::Relation InfoClassProtoToRelation( RelationInfo ConvertInfoClassPBToRelationInfo( const stirling::stirlingpb::InfoClass& info_class_pb) { + auto schema = info_class_pb.schema(); + std::optional mutation_id; + if (schema.mutation_id() != "") { + mutation_id = schema.mutation_id(); + } if (info_class_pb.schema().tabletized()) { - return RelationInfo(info_class_pb.schema().name(), info_class_pb.id(), - info_class_pb.schema().desc(), info_class_pb.schema().tabletization_key(), + return RelationInfo(schema.name(), info_class_pb.id(), schema.desc(), + schema.tabletization_key(), mutation_id, InfoClassProtoToRelation(info_class_pb)); } return RelationInfo(info_class_pb.schema().name(), info_class_pb.id(), - info_class_pb.schema().desc(), InfoClassProtoToRelation(info_class_pb)); + info_class_pb.schema().desc(), mutation_id, + InfoClassProtoToRelation(info_class_pb)); } } // namespace diff --git a/src/shared/schema/utils.h b/src/shared/schema/utils.h index 991edda5340..0b586f8d34c 100644 --- a/src/shared/schema/utils.h +++ b/src/shared/schema/utils.h @@ -32,20 +32,22 @@ namespace px { struct RelationInfo { RelationInfo() = default; RelationInfo(std::string name, uint64_t id, std::string desc, - table_store::schema::Relation relation) + std::optional mutation_id, table_store::schema::Relation relation) : name(std::move(name)), id(id), desc(std::move(desc)), tabletized(false), + mutation_id(mutation_id), relation(std::move(relation)) {} RelationInfo(std::string name, uint64_t id, std::string desc, uint64_t tabletization_key_idx, - table_store::schema::Relation relation) + std::optional mutation_id, table_store::schema::Relation relation) : name(std::move(name)), id(id), desc(std::move(desc)), tabletized(true), tabletization_key_idx(tabletization_key_idx), + mutation_id(mutation_id), relation(std::move(relation)) {} std::string name; @@ -53,6 +55,7 @@ struct RelationInfo { std::string desc; bool tabletized; uint64_t tabletization_key_idx; + std::optional mutation_id; table_store::schema::Relation relation; }; diff --git a/src/stirling/BUILD.bazel b/src/stirling/BUILD.bazel index 12b399e3218..ec3e9a759e4 100644 --- a/src/stirling/BUILD.bazel +++ b/src/stirling/BUILD.bazel @@ -49,6 +49,7 @@ pl_cc_library( "//src/stirling/proto:stirling_pl_cc_proto", "//src/stirling/source_connectors/dynamic_bpftrace:cc_library", "//src/stirling/source_connectors/dynamic_tracer:cc_library", + "//src/stirling/source_connectors/file_source:cc_library", "//src/stirling/source_connectors/jvm_stats:cc_library", "//src/stirling/source_connectors/network_stats:cc_library", "//src/stirling/source_connectors/perf_profiler:cc_library", diff --git a/src/stirling/core/BUILD.bazel b/src/stirling/core/BUILD.bazel index ab795229aad..587f46b427c 100644 --- a/src/stirling/core/BUILD.bazel +++ b/src/stirling/core/BUILD.bazel @@ -32,6 +32,7 @@ pl_cc_library( "//src/stirling/source_connectors/cpu_stat_bpftrace:__pkg__", "//src/stirling/source_connectors/dynamic_bpftrace:__pkg__", "//src/stirling/source_connectors/dynamic_tracer:__pkg__", + "//src/stirling/source_connectors/file_source:__pkg__", "//src/stirling/source_connectors/jvm_stats:__pkg__", "//src/stirling/source_connectors/network_stats:__pkg__", "//src/stirling/source_connectors/perf_profiler:__pkg__", diff --git a/src/stirling/core/info_class_manager.cc b/src/stirling/core/info_class_manager.cc index 82483a8e180..19cb1fa91f7 100644 --- a/src/stirling/core/info_class_manager.cc +++ b/src/stirling/core/info_class_manager.cc @@ -32,8 +32,12 @@ void InfoClassManager::InitContext(ConnectorContext* ctx) { source_->InitContext stirlingpb::InfoClass InfoClassManager::ToProto() const { stirlingpb::InfoClass info_class_proto; - info_class_proto.mutable_schema()->CopyFrom(schema_.ToProto()); + auto schema = info_class_proto.mutable_schema(); + schema->CopyFrom(schema_.ToProto()); info_class_proto.set_id(id()); + if (mutation_id_.has_value()) { + schema->set_mutation_id(mutation_id_.value()); + } return info_class_proto; } diff --git a/src/stirling/core/info_class_manager.h b/src/stirling/core/info_class_manager.h index dc929a871d7..98a5cf05f9f 100644 --- a/src/stirling/core/info_class_manager.h +++ b/src/stirling/core/info_class_manager.h @@ -73,6 +73,13 @@ class InfoClassManager final : public NotCopyable { */ void SetSourceConnector(SourceConnector* source) { source_ = source; } + /** + * @brief Mutation ID connector connected to this Info Class if one exists + * + * @param source Pointer to source connector instance. + */ + void SetMutationId(std::optional mutation_id) { mutation_id_ = mutation_id; } + /** * Get the schema of the InfoClass. * @@ -128,6 +135,9 @@ class InfoClassManager final : public NotCopyable { // Pointer to the data table where the data is stored. std::unique_ptr data_table_; + + // The mutation ID of the info class manager if one exists. + std::optional mutation_id_; }; using InfoClassManagerVec = std::vector>; diff --git a/src/stirling/core/info_class_manager_test.cc b/src/stirling/core/info_class_manager_test.cc index c67f78e24fe..f8440c9b856 100644 --- a/src/stirling/core/info_class_manager_test.cc +++ b/src/stirling/core/info_class_manager_test.cc @@ -27,6 +27,7 @@ namespace stirling { using types::DataType; using types::PatternType; +// TODO(ddelnano): Add test regarding ToProto and SetMutationId. TEST(InfoClassInfoSchemaTest, infoclass_mgr_proto_getters_test) { InfoClassManager info_class_mgr(SeqGenConnector::kSeq0Table); auto source = SeqGenConnector::Create("sequences"); diff --git a/src/stirling/core/source_connector.cc b/src/stirling/core/source_connector.cc index a9566daea49..ae2373c8fbb 100644 --- a/src/stirling/core/source_connector.cc +++ b/src/stirling/core/source_connector.cc @@ -61,7 +61,7 @@ void SourceConnector::PushData(DataPushCallback agent_callback) { Status s = agent_callback( data_table->id(), record_batch.tablet_id, std::make_unique(std::move(record_batch.records))); - LOG_IF(DFATAL, !s.ok()) << absl::Substitute("Failed to push data. Message = $0", s.msg()); + LOG_IF(ERROR, !s.ok()) << absl::Substitute("Failed to push data. Message = $0", s.msg()); } } } diff --git a/src/stirling/proto/stirling.proto b/src/stirling/proto/stirling.proto index ab36ce6297c..e0d1b374c23 100644 --- a/src/stirling/proto/stirling.proto +++ b/src/stirling/proto/stirling.proto @@ -48,6 +48,7 @@ message TableSchema { repeated Element elements = 2; bool tabletized = 3; uint64 tabletization_key = 4; + string mutation_id = 6; } // InfoClass stores a set of Elements that share common timestamps (i.e., they are diff --git a/src/stirling/source_connectors/file_source/BUILD.bazel b/src/stirling/source_connectors/file_source/BUILD.bazel new file mode 100644 index 00000000000..11dbfdc1630 --- /dev/null +++ b/src/stirling/source_connectors/file_source/BUILD.bazel @@ -0,0 +1,60 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("//bazel:pl_build_system.bzl", "pl_cc_bpf_test", "pl_cc_library", "pl_cc_test") + +package(default_visibility = ["//src/stirling:__subpackages__"]) + +pl_cc_library( + name = "cc_library", + srcs = glob( + ["*.cc"], + exclude = [ + "**/*_test.cc", + ], + ), + hdrs = glob(["*.h"]), + deps = [ + "//src/stirling/core:cc_library", + "//src/stirling/utils:cc_library", + "@com_github_tencent_rapidjson//:rapidjson", + ], +) + +pl_cc_test( + name = "file_source_connector_test", + srcs = ["file_source_connector_test.cc"], + data = [ + "testdata/test.json", + "testdata/unsupported.json", + ], + deps = [ + ":cc_library", + ], +) + +pl_cc_test( + name = "stirling_fs_test", + srcs = ["stirling_fs_test.cc"], + data = [ + "testdata/test.json", + "testdata/unsupported.json", + ], + deps = [ + ":cc_library", + "//src/stirling:cc_library", + ], +) diff --git a/src/stirling/source_connectors/file_source/file_source_connector.cc b/src/stirling/source_connectors/file_source/file_source_connector.cc new file mode 100644 index 00000000000..112c472ce05 --- /dev/null +++ b/src/stirling/source_connectors/file_source/file_source_connector.cc @@ -0,0 +1,287 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/stirling/source_connectors/file_source/file_source_connector.h" + +#include +#include + +#include +#include + +using px::StatusOr; + +constexpr size_t kMaxStringBytes = std::numeric_limits::max(); + +namespace px { +namespace stirling { + +using px::utils::RapidJSONTypeToString; + +StatusOr DataElementsFromJSON(std::ifstream& f_stream) { + std::string line; + std::getline(f_stream, line); + + if (f_stream.eof()) { + return error::Internal("Failed to read file, hit EOF"); + } + + rapidjson::Document d; + rapidjson::ParseResult ok = d.Parse(line.c_str()); + if (!ok) { + return error::Internal("Failed to parse JSON: $0 $1", line, + rapidjson::GetParseError_En(ok.Code())); + } + auto elements = d.MemberCount() + 2; // Add additional columns for time_ + BackedDataElements data_elements(elements); + + data_elements.emplace_back("time_", "", types::DataType::TIME64NS); + // TODO(ddelnano): Make it configurable to request a UUID in PxL rather than creating it by default. + data_elements.emplace_back("uuid", "", types::DataType::UINT128); + for (rapidjson::Value::ConstMemberIterator itr = d.MemberBegin(); itr != d.MemberEnd(); ++itr) { + auto name = itr->name.GetString(); + const auto& value = itr->value; + types::DataType col_type; + + if (value.IsInt()) { + col_type = types::DataType::INT64; + } else if (value.IsDouble()) { + col_type = types::DataType::FLOAT64; + } else if (value.IsString()) { + col_type = types::DataType::STRING; + } else if (value.IsBool()) { + col_type = types::DataType::BOOLEAN; + } else if (value.IsObject()) { + col_type = types::DataType::STRING; + } else if (value.IsArray()) { + col_type = types::DataType::STRING; + } else { + return error::Internal("Unable to parse JSON key '$0': unsupported type: $1", name, + RapidJSONTypeToString(itr->value.GetType())); + } + data_elements.emplace_back(name, "", col_type); + } + + return data_elements; +} + +StatusOr DataElementsFromCSV(std::ifstream& file_name) { + PX_UNUSED(file_name); + return BackedDataElements(0); +} + +StatusOr DataElementsForUnstructuredFile() { + BackedDataElements data_elements(3); + data_elements.emplace_back("time_", "", types::DataType::TIME64NS); + data_elements.emplace_back("uuid", "", types::DataType::UINT128); + data_elements.emplace_back("raw_line", "", types::DataType::STRING); + return data_elements; +} + +namespace { + +StatusOr> DataElementsFromFile( + const std::filesystem::path& file_name, bool allow_unstructured = true) { + auto f = std::ifstream(file_name.string()); + if (!f.is_open()) { + return error::Internal("Failed to open file: $0 with error=$1", file_name.string(), + strerror(errno)); + } + + // get the file extension of the file + auto extension = file_name.extension().string(); + BackedDataElements data_elements; + if (extension == ".csv") { + PX_ASSIGN_OR_RETURN(data_elements, DataElementsFromCSV(f)); + } else if (extension == ".json") { + PX_ASSIGN_OR_RETURN(data_elements, DataElementsFromJSON(f)); + } else { + if (allow_unstructured) { + LOG(WARNING) << absl::Substitute("Unsupported file type: $0, treating each line as a single column", extension); + PX_ASSIGN_OR_RETURN(data_elements, DataElementsForUnstructuredFile()); + } else { + // TODO(ddelnano): If file extension is blank this isn't a helpful error message. + return error::Internal("Unsupported file type: $0", extension); + } + } + + f.seekg(0, std::ios::beg); + return std::make_pair(std::move(data_elements), std::move(f)); +} + +} // namespace + +StatusOr> FileSourceConnector::Create( + std::string_view source_name, const std::filesystem::path file_name) { + auto host_path = px::system::Config::GetInstance().ToHostPath(file_name); + PX_ASSIGN_OR_RETURN(auto data_elements_and_file, DataElementsFromFile(host_path)); + auto& [data_elements, file] = data_elements_and_file; + + // Get just the filename and extension + auto name = host_path.filename().string(); + std::unique_ptr table_schema = + DynamicDataTableSchema::Create(name, "", std::move(data_elements)); + return std::unique_ptr(new FileSourceConnector( + source_name, std::move(host_path), std::move(file), std::move(table_schema))); +} + +FileSourceConnector::FileSourceConnector(std::string_view source_name, + const std::filesystem::path& file_name, std::ifstream file, + std::unique_ptr table_schema) + : SourceConnector(source_name, ArrayView(&table_schema->Get(), 1)), + name_(source_name), + file_name_(file_name), + file_(std::move(file)), + table_schema_(std::move(table_schema)), + transfer_specs_({ + {".json", {&FileSourceConnector::TransferDataFromJSON}}, + {".csv", {&FileSourceConnector::TransferDataFromCSV}}, + {"", {&FileSourceConnector::TransferDataFromUnstructuredFile}}, + {".log", {&FileSourceConnector::TransferDataFromUnstructuredFile}}, + }) {} + +Status FileSourceConnector::InitImpl() { + sampling_freq_mgr_.set_period(kSamplingPeriod); + push_freq_mgr_.set_period(kPushPeriod); + return Status::OK(); +} + +Status FileSourceConnector::StopImpl() { + file_.close(); + return Status::OK(); +} + +constexpr int kMaxLines = 1000; + +void FileSourceConnector::TransferDataFromJSON(DataTable::DynamicRecordBuilder* /*r*/, + uint64_t nanos, const std::string& line) { + rapidjson::Document d; + rapidjson::ParseResult ok = d.Parse(line.c_str()); + if (!ok) { + LOG(ERROR) << absl::Substitute("Failed to parse JSON: $0 $1", line, + rapidjson::GetParseError_En(ok.Code())); + return; + } + DataTable::DynamicRecordBuilder r(data_tables_[0]); + const auto& columns = table_schema_->Get().elements(); + + for (size_t col = 0; col < columns.size(); col++) { + const auto& column = columns[col]; + std::string key(column.name()); + // time_ is inserted by stirling and not within the polled file + if (key == "time_") { + r.Append(col, types::Time64NSValue(nanos)); + continue; + } else if (key == "uuid") { + sole::uuid u = sole::uuid4(); + r.Append(col, types::UInt128Value(u.ab, u.cd)); + continue; + } + const auto& value = d[key.c_str()]; + switch (column.type()) { + case types::DataType::INT64: + r.Append(col, types::Int64Value(value.GetInt())); + break; + case types::DataType::FLOAT64: + r.Append(col, types::Float64Value(value.GetDouble())); + break; + case types::DataType::STRING: + if (value.IsArray() || value.IsObject()) { + rapidjson::StringBuffer buffer; + rapidjson::Writer writer(buffer); + value.Accept(writer); + r.Append(col, types::StringValue(buffer.GetString()), kMaxStringBytes); + } else { + r.Append(col, types::StringValue(value.GetString()), kMaxStringBytes); + } + break; + case types::DataType::BOOLEAN: + r.Append(col, types::BoolValue(value.GetBool())); + break; + default: + LOG(FATAL) << absl::Substitute( + "Failed to insert field into DataTable: unsupported type '$0'", + types::DataType_Name(column.type())); + } + } + return; +} + +void FileSourceConnector::TransferDataFromUnstructuredFile(DataTable::DynamicRecordBuilder* /*r*/, + uint64_t nanos, const std::string& line) { + DataTable::DynamicRecordBuilder r(data_tables_[0]); + r.Append(0, types::Time64NSValue(nanos)); + sole::uuid u = sole::uuid4(); + r.Append(1, types::UInt128Value(u.ab, u.cd)); + r.Append(2, types::StringValue(line), kMaxStringBytes); + return; +} + +void FileSourceConnector::TransferDataFromCSV(DataTable::DynamicRecordBuilder* r, uint64_t nanos, + const std::string& line) { + PX_UNUSED(r); + PX_UNUSED(nanos); + PX_UNUSED(line); + return; +} + +void FileSourceConnector::TransferDataImpl(ConnectorContext* /* ctx */) { + DCHECK_EQ(data_tables_.size(), 1U) << "Only one table is allowed per FileSourceConnector."; + int i = 0; + auto extension = file_name_.extension().string(); + auto transfer_fn = transfer_specs_.at(extension).transfer_fn; + + auto now = std::chrono::system_clock::now(); + auto duration = now.time_since_epoch(); + uint64_t nanos = std::chrono::duration_cast(duration).count(); + auto before_pos = file_.tellg(); + while (i < kMaxLines) { + std::string line; + std::getline(file_, line); + + if (file_.eof() || line.empty()) { + file_.clear(); + auto after_pos = file_.tellg(); + if (after_pos == last_pos_) { + LOG_EVERY_N(INFO, 100) << absl::Substitute("Reached EOF for file=$0 eof count=$1 pos=", + file_name_.string(), eof_count_) + << after_pos; + eof_count_++; + + // TODO(ddlenano): Using a file's inode is a better way to detect file rotation. For now, + // this will suffice. + std::ifstream s(file_name_, std::ios::ate | std::ios::binary); + if (s.tellg() < after_pos) { + LOG(INFO) << "Detected file rotation, resetting file position"; + file_.close(); + file_.open(file_name_, std::ios::in); + } + } + break; + } + + transfer_fn(*this, nullptr, nanos, line); + i++; + } + auto after_pos = file_.tellg(); + last_pos_ = after_pos; + monitor_.AppendStreamStatusRecord(file_name_, after_pos - before_pos, ""); +} + +} // namespace stirling +} // namespace px diff --git a/src/stirling/source_connectors/file_source/file_source_connector.h b/src/stirling/source_connectors/file_source/file_source_connector.h new file mode 100644 index 00000000000..1525327a652 --- /dev/null +++ b/src/stirling/source_connectors/file_source/file_source_connector.h @@ -0,0 +1,87 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include +#include +#include + +#include "src/stirling/core/source_connector.h" +#include "src/stirling/utils/monitor.h" + +namespace px { +namespace stirling { + +class FileSourceConnector : public SourceConnector { + using pos_type = std::ifstream::pos_type; + + public: + static constexpr auto kSamplingPeriod = std::chrono::milliseconds{100}; + // Set this high enough to avoid the following error: + // F20250129 00:05:30.980778 2527479 source_connector.cc:64] Failed to push data. Message = + // Table_id 1 doesn't exist. + // + // This occurs when the Stirling data table has data but the table store hasn't received its + // schema yet. I'm not sure why the dynamic tracer doesn't experience this case. + static constexpr auto kPushPeriod = std::chrono::milliseconds{7000}; + + static StatusOr > Create(std::string_view source_name, + const std::filesystem::path file_name); + + FileSourceConnector() = delete; + ~FileSourceConnector() override = default; + + protected: + explicit FileSourceConnector(std::string_view source_name, const std::filesystem::path& file_name, + std::ifstream file, + std::unique_ptr table_schema); + Status InitImpl() override; + Status StopImpl() override; + void TransferDataImpl(ConnectorContext* ctx) override; + + private: + void TransferDataFromUnstructuredFile(DataTable::DynamicRecordBuilder* builder, uint64_t nanos, + const std::string& line); + void TransferDataFromJSON(DataTable::DynamicRecordBuilder* builder, uint64_t nanos, + const std::string& line); + void TransferDataFromCSV(DataTable::DynamicRecordBuilder* builder, uint64_t nanos, + const std::string& line); + + struct FileTransferSpec { + std::function + transfer_fn; + }; + std::string name_; + const std::filesystem::path file_name_; + std::ifstream file_; + std::unique_ptr table_schema_; + absl::flat_hash_map transfer_specs_; + int eof_count_ = 0; + pos_type last_pos_ = 0; + StirlingMonitor& monitor_ = *StirlingMonitor::GetInstance(); +}; + +StatusOr DataElementsFromJSON(std::ifstream& f_stream); +StatusOr DataElementsFromCSV(std::ifstream& f_stream); +StatusOr DataElementsForUnstructuredFile(); + +} // namespace stirling +} // namespace px diff --git a/src/stirling/source_connectors/file_source/file_source_connector_test.cc b/src/stirling/source_connectors/file_source/file_source_connector_test.cc new file mode 100644 index 00000000000..4b5dba3c6b2 --- /dev/null +++ b/src/stirling/source_connectors/file_source/file_source_connector_test.cc @@ -0,0 +1,82 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "src/common/testing/testing.h" +#include "src/stirling/source_connectors/file_source/file_source_connector.h" + +namespace px { +namespace stirling { + +TEST(FileSourceConnectorTest, DataElementsFromJSON) { + const auto file_path = + testing::BazelRunfilePath("src/stirling/source_connectors/file_source/testdata/test.json"); + auto stream = std::ifstream(file_path); + auto result = DataElementsFromJSON(stream); + ASSERT_OK(result); + BackedDataElements elements = std::move(result.ValueOrDie()); + + ASSERT_EQ(elements.elements().size(), 8); + EXPECT_EQ(elements.elements()[0].name(), "time_"); + EXPECT_EQ(elements.elements()[0].type(), types::DataType::TIME64NS); + EXPECT_EQ(elements.elements()[1].name(), "uuid"); + EXPECT_EQ(elements.elements()[1].type(), types::DataType::UINT128); + EXPECT_EQ(elements.elements()[2].name(), "id"); + EXPECT_EQ(elements.elements()[2].type(), types::DataType::INT64); + EXPECT_EQ(elements.elements()[3].name(), "active"); + EXPECT_EQ(elements.elements()[3].type(), types::DataType::BOOLEAN); + EXPECT_EQ(elements.elements()[4].name(), "score"); + EXPECT_EQ(elements.elements()[4].type(), types::DataType::FLOAT64); + EXPECT_EQ(elements.elements()[5].name(), "name"); + EXPECT_EQ(elements.elements()[5].type(), types::DataType::STRING); + EXPECT_EQ(elements.elements()[6].name(), "object"); + EXPECT_EQ(elements.elements()[6].type(), types::DataType::STRING); + EXPECT_EQ(elements.elements()[7].name(), "arr"); + EXPECT_EQ(elements.elements()[7].type(), types::DataType::STRING); +} + +TEST(FileSourceConnectorTest, DISABLED_DataElementsFromJSON_UnsupportedTypes) { + const auto file_path = testing::BazelRunfilePath( + "src/stirling/source_connectors/file_source/testdata/unsupported.json"); + auto stream = std::ifstream(file_path); + auto result = DataElementsFromJSON(stream); + ASSERT_EQ(result.ok(), false); + ASSERT_EQ(result.status().msg(), + "Unable to parse JSON key 'unsupported': unsupported type: Object"); +} + +TEST(FileSourceConnectorTest, DataElementsForUnstructuredFile) { + + const auto file_path = testing::BazelRunfilePath( + "src/stirling/source_connectors/file_source/testdata/kern.log"); + auto stream = std::ifstream(file_path); + auto result = DataElementsForUnstructuredFile(); + ASSERT_OK(result); + BackedDataElements elements = std::move(result.ValueOrDie()); + EXPECT_EQ(elements.elements()[0].name(), "time_"); + EXPECT_EQ(elements.elements()[0].type(), types::DataType::TIME64NS); + EXPECT_EQ(elements.elements()[1].name(), "uuid"); + EXPECT_EQ(elements.elements()[1].type(), types::DataType::UINT128); + EXPECT_EQ(elements.elements()[2].name(), "raw_line"); + EXPECT_EQ(elements.elements()[2].type(), types::DataType::STRING); +} + +} // namespace stirling +} // namespace px diff --git a/src/stirling/source_connectors/file_source/stirling_fs_test.cc b/src/stirling/source_connectors/file_source/stirling_fs_test.cc new file mode 100644 index 00000000000..6ce799e7326 --- /dev/null +++ b/src/stirling/source_connectors/file_source/stirling_fs_test.cc @@ -0,0 +1,225 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include + +#include "src/common/base/base.h" +#include "src/common/testing/testing.h" +#include "src/stirling/core/source_registry.h" +#include "src/stirling/core/types.h" +#include "src/stirling/stirling.h" + +namespace px { +namespace stirling { + +using ::px::testing::BazelRunfilePath; +using ::testing::SizeIs; +using ::testing::StrEq; + +//----------------------------------------------------------------------------- +// Test fixture and shared code +//----------------------------------------------------------------------------- + +class StirlingFileSourceTest : public ::testing::Test { + protected: + void SetUp() override { + std::unique_ptr registry = std::make_unique(); + stirling_ = Stirling::Create(std::move(registry)); + + // Set function to call on data pushes. + stirling_->RegisterDataPushCallback( + absl::bind_front(&StirlingFileSourceTest::AppendData, this)); + } + + Status AppendData(uint64_t /*table_id*/, types::TabletID /*tablet_id*/, + std::unique_ptr record_batch) { + record_batches_.push_back(std::move(record_batch)); + return Status::OK(); + } + + StatusOr WaitForStatus(sole::uuid trace_id) { + StatusOr s; + do { + s = stirling_->GetFileSourceInfo(trace_id); + std::this_thread::sleep_for(std::chrono::seconds(1)); + } while (!s.ok() && s.code() == px::statuspb::Code::RESOURCE_UNAVAILABLE); + + return s; + } + + std::optional FindFieldIndex(const stirlingpb::TableSchema& schema, + std::string_view field_name) { + int idx = 0; + for (const auto& e : schema.elements()) { + if (e.name() == field_name) { + return idx; + } + ++idx; + } + return {}; + } + + void DeployFileSource(std::string file_name, bool trigger_stop = true) { + sole::uuid id = sole::uuid4(); + stirling_->RegisterFileSource(id, file_name); + + // Should deploy. + stirlingpb::Publish publication; + ASSERT_OK_AND_ASSIGN(publication, WaitForStatus(id)); + + // Check the incremental publication change. + ASSERT_EQ(publication.published_info_classes_size(), 1); + info_class_ = publication.published_info_classes(0); + + // Run Stirling data collector. + ASSERT_OK(stirling_->RunAsThread()); + + // Wait to capture some data. + while (record_batches_.empty()) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + + if (trigger_stop) { + ASSERT_OK(stirling_->RemoveFileSource(id)); + + // Should get removed. + EXPECT_EQ(WaitForStatus(id).code(), px::statuspb::Code::NOT_FOUND); + + stirling_->Stop(); + } + } + + std::unique_ptr stirling_; + std::vector> record_batches_; + stirlingpb::InfoClass info_class_; +}; + +class FileSourceJSONTest : public StirlingFileSourceTest { + protected: + const std::string kFilePath = + BazelRunfilePath("src/stirling/source_connectors/file_source/testdata/test.json"); +}; + +TEST_F(FileSourceJSONTest, ParsesJSONFile) { + DeployFileSource(kFilePath); + EXPECT_THAT(record_batches_, SizeIs(1)); + auto& rb = record_batches_[0]; + // Expect there to be 8 columns. time_ and the 4 cols from the JSON file. + EXPECT_EQ(rb->size(), 8); + + for (size_t i = 0; i < rb->size(); ++i) { + auto col_wrapper = rb->at(i); + // The JSON file has 10 lines. + EXPECT_EQ(col_wrapper->Size(), 10); + } +} + +TEST_F(FileSourceJSONTest, ContinuesReadingAfterEOFReached) { + std::string file_name = "./test.json"; + std::ofstream ofs(file_name, std::ios::app); + if (!ofs) { + LOG(FATAL) << absl::Substitute("Failed to open file= $0 received error=$1", kFilePath, strerror(errno)); + } + // FileSourceConnector parses the first line to infer the file's schema, an empty file will cause an error. + ofs << R"({"id": 0, "active": false, "score": 6.28, "name": "item0", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; + + DeployFileSource(file_name, false); + EXPECT_THAT(record_batches_, SizeIs(1)); + auto& rb = record_batches_[0]; + // Expect there to be 8 columns. time_ and the 4 cols from the JSON file. + EXPECT_EQ(rb->size(), 8); + + for (size_t i = 0; i < rb->size(); ++i) { + auto col_wrapper = rb->at(i); + // TODO(ddelnano): Clean up these log messages and add better assertions for uint128 case + if (i == 1) { + LOG(INFO) << col_wrapper->Get(0).val; + LOG(INFO) << col_wrapper->Get(1).val; + } else if (i == 6) { + LOG(INFO) << col_wrapper->Get(0); + EXPECT_EQ(col_wrapper->Get(0), R"({"a":1,"b":2})"); + } else if (i == 7) { + LOG(INFO) << col_wrapper->Get(0); + EXPECT_EQ(col_wrapper->Get(0), R"([0,1,2])"); + } + // The file's first row batch has 1 line + EXPECT_EQ(col_wrapper->Size(), 1); + } + + ofs << R"({"id": 1, "active": false, "score": 6.28, "name": "item1", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; + ofs.flush(); + ofs.close(); + + while (record_batches_.size() < 2) { + std::this_thread::sleep_for(std::chrono::seconds(3)); + LOG(INFO) << "Waiting for more data..."; + } + + auto& rb2 = record_batches_[1]; + for (size_t i = 0; i < rb2->size(); ++i) { + auto col_wrapper = rb2->at(i); + // The file's second row batch has 1 line + EXPECT_EQ(col_wrapper->Size(), 1); + } +} + +TEST_F(FileSourceJSONTest, ContinuesReadingAfterFileRotation) { + std::string file_name = "./test2.json"; + std::ofstream ofs(file_name, std::ios::app); + if (!ofs) { + LOG(FATAL) << absl::Substitute("Failed to open file= $0 received error=$1", kFilePath, strerror(errno)); + } + // FileSourceConnector parses the first line to infer the file's schema, an empty file will cause an error. + ofs << R"({"id": 0, "active": false, "score": 6.28, "name": "item0", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; + ofs << R"({"id": 1, "active": false, "score": 6.28, "name": "item1", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; + + DeployFileSource(file_name, false); + EXPECT_THAT(record_batches_, SizeIs(1)); + auto& rb = record_batches_[0]; + // Expect there to be 8 columns. time_ and the 4 cols from the JSON file. + EXPECT_EQ(rb->size(), 8); + + for (size_t i = 0; i < rb->size(); ++i) { + auto col_wrapper = rb->at(i); + // The file's first row batch has 2 lines + EXPECT_EQ(col_wrapper->Size(), 2); + } + + std::ofstream ofs2(file_name, std::ios::trunc); + ofs2 << R"({"id": 2, "active": false, "score": 6.28, "name": "item2", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; + ofs2.flush(); + ofs.close(); + + while (record_batches_.size() < 2) { + std::this_thread::sleep_for(std::chrono::seconds(3)); + LOG(INFO) << "Waiting for more data..."; + } + + auto& rb2 = record_batches_[1]; + for (size_t i = 0; i < rb2->size(); ++i) { + auto col_wrapper = rb2->at(i); + // The file's second row batch has 1 line + EXPECT_EQ(col_wrapper->Size(), 1); + } +} + +} // namespace stirling +} // namespace px diff --git a/src/stirling/source_connectors/file_source/testdata/kern.log b/src/stirling/source_connectors/file_source/testdata/kern.log new file mode 100644 index 00000000000..fed434d43a4 --- /dev/null +++ b/src/stirling/source_connectors/file_source/testdata/kern.log @@ -0,0 +1,5 @@ +2025-03-05T22:30:12.313406+00:00 dev-vm kernel: ll header: 00000000: ff ff ff ff ff ff 42 01 0a 81 00 01 08 06 +2025-03-05T22:30:18.313309+00:00 dev-vm kernel: IPv4: martian source 10.129.0.8 from 10.129.0.1, on dev ens4 +2025-03-05T22:30:18.313333+00:00 dev-vm kernel: ll header: 00000000: ff ff ff ff ff ff 42 01 0a 81 00 01 08 06 +2025-03-05T22:30:24.313240+00:00 dev-vm kernel: IPv4: martian source 10.129.0.8 from 10.129.0.1, on dev ens4 +2025-03-05T22:30:24.313268+00:00 dev-vm kernel: ll header: 00000000: ff ff ff ff ff ff 42 01 0a 81 00 01 08 06 diff --git a/src/stirling/source_connectors/file_source/testdata/test.json b/src/stirling/source_connectors/file_source/testdata/test.json new file mode 100644 index 00000000000..f65c3fabafb --- /dev/null +++ b/src/stirling/source_connectors/file_source/testdata/test.json @@ -0,0 +1,10 @@ +{"id": 1, "active": true, "score": 3.14, "name": "item1", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 2, "active": false, "score": 2.71, "name": "item2", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 3, "active": true, "score": 1.41, "name": "item3", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 4, "active": false, "score": 1.73, "name": "item4", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 5, "active": true, "score": 0.99, "name": "item5", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 6, "active": false, "score": 2.18, "name": "item6", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 7, "active": true, "score": 3.67, "name": "item7", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 8, "active": false, "score": 4.56, "name": "item8", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 9, "active": true, "score": 5.32, "name": "item9", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} +{"id": 10, "active": false, "score": 6.28, "name": "item10", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} diff --git a/src/stirling/source_connectors/file_source/testdata/unsupported.json b/src/stirling/source_connectors/file_source/testdata/unsupported.json new file mode 100644 index 00000000000..455064ea679 --- /dev/null +++ b/src/stirling/source_connectors/file_source/testdata/unsupported.json @@ -0,0 +1 @@ +{"id": 1, "active": true, "score": 3.14, "name": "item1", "unsupported": {"a": 1, "b": 2}} diff --git a/src/stirling/source_connectors/stirling_error/BUILD.bazel b/src/stirling/source_connectors/stirling_error/BUILD.bazel index 15f25dc41af..c0c843d88ca 100644 --- a/src/stirling/source_connectors/stirling_error/BUILD.bazel +++ b/src/stirling/source_connectors/stirling_error/BUILD.bazel @@ -17,7 +17,7 @@ load("//bazel:pl_build_system.bzl", "pl_cc_bpf_test", "pl_cc_library") load("//src/stirling/source_connectors/perf_profiler/testing:testing.bzl", "agent_libs", "px_jattach", "stirling_profiler_java_args") -package(default_visibility = ["//src/stirling:__subpackages__"]) +package(default_visibility = ["//src/stirling:__subpackages__", "//src/vizier/services/agent/shared/manager:__subpackages__"]) pl_cc_library( name = "cc_library", @@ -42,6 +42,7 @@ pl_cc_bpf_test( args = stirling_profiler_java_args, data = agent_libs + [ px_jattach, + "testdata/test.json", "//src/stirling/source_connectors/perf_profiler/testing/java:java_image_base-java-profiler-test-image-omit-frame-pointer.tar", ], flaky = True, diff --git a/src/stirling/source_connectors/stirling_error/sink_results_table.h b/src/stirling/source_connectors/stirling_error/sink_results_table.h new file mode 100644 index 00000000000..d2f15bbfa57 --- /dev/null +++ b/src/stirling/source_connectors/stirling_error/sink_results_table.h @@ -0,0 +1,51 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include "src/common/base/base.h" +#include "src/stirling/core/canonical_types.h" +#include "src/stirling/core/output.h" +#include "src/stirling/core/source_connector.h" + +namespace px { +namespace stirling { + +// clang-format off +constexpr DataElement kSinkResultsElements[] = { + canonical_data_elements::kTime, + canonical_data_elements::kUPID, + {"bytes_transferred", "", + types::DataType::INT64, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, + {"destination", "The planpb::OperatorType enum of the sink", + types::DataType::INT64, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, + {"stream_id", "The ID of the stream of interest.", + types::DataType::STRING, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, +}; + +constexpr DataTableSchema kSinkResultsTable { + "sink_results", + "This table contains the sink node results during execution.", + kSinkResultsElements +}; + +// clang-format on +DEFINE_PRINT_TABLE(SinkResults); + +} // namespace stirling +} // namespace px diff --git a/src/stirling/source_connectors/stirling_error/stirling_error_bpf_test.cc b/src/stirling/source_connectors/stirling_error/stirling_error_bpf_test.cc index 7eb9f8a910c..df3b567982b 100644 --- a/src/stirling/source_connectors/stirling_error/stirling_error_bpf_test.cc +++ b/src/stirling/source_connectors/stirling_error/stirling_error_bpf_test.cc @@ -106,6 +106,23 @@ std::vector ToProbeRecordVector( return result; } +std::vector ToStreamRecordVector( + const std::vector>& record_batches) { + std::vector result; + + for (size_t rb_idx = 0; rb_idx < record_batches.size(); ++rb_idx) { + auto& rb = *record_batches[rb_idx]; + for (size_t idx = 0; idx < rb.front()->Size(); ++idx) { + StreamStatusRecord r; + r.stream_id = rb[2]->Get(idx).string(); + r.bytes_sent = rb[3]->Get(idx).val; + r.info = rb[4]->Get(idx).string(); + result.push_back(r); + } + } + return result; +} + // A SourceConnector that fails on Init. class FaultyConnector : public SourceConnector { public: @@ -195,6 +212,25 @@ class StirlingErrorTest : public ::testing::Test { return trace_id; } + StatusOr DeployFileSource(const std::string& program_text) { + // Compile file source. + PX_ASSIGN_OR_RETURN(auto compiled_file_source, + px::carnot::planner::compiler::CompileFileSource(program_text)); + + // Register tracepoint. + sole::uuid id = sole::uuid4(); + stirling_->RegisterFileSource(id, std::move(compiled_file_source.glob_pattern())); + + // Wait for deployment to finish. + StatusOr s; + do { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + s = stirling_->GetFileSourceInfo(id); + } while (!s.ok() && s.code() == px::statuspb::Code::RESOURCE_UNAVAILABLE); + + return id; + } + Status AppendData(uint64_t table_id, types::TabletID tablet_id, std::unique_ptr record_batch) { PX_UNUSED(tablet_id); @@ -208,6 +244,8 @@ class StirlingErrorTest : public ::testing::Test { source_status_batches_.push_back(std::move(record_batch)); } else if (table_name == "probe_status") { probe_status_batches_.push_back(std::move(record_batch)); + } else if (table_name == "stream_status") { + stream_status_batches_.push_back(std::move(record_batch)); } } return Status::OK(); @@ -221,6 +259,9 @@ class StirlingErrorTest : public ::testing::Test { } else if constexpr (std::is_same_v) { return WaitAndExpectRecords([&]() { return ToProbeRecordVector(probe_status_batches_); }, expected); + } else if constexpr (std::is_same_v) { + return WaitAndExpectRecords([&]() { return ToStreamRecordVector(stream_status_batches_); }, + expected); } else { static_assert(always_false); } @@ -230,6 +271,7 @@ class StirlingErrorTest : public ::testing::Test { std::unique_ptr stirling_; std::vector> source_status_batches_; std::vector> probe_status_batches_; + std::vector> stream_status_batches_; }; TEST_F(StirlingErrorTest, SourceConnectorInitOK) { @@ -527,5 +569,55 @@ TEST_F(StirlingErrorTest, PerfProfilerNoPreserveFramePointer) { EXPECT_THAT(probe_records, IsEmpty()); } +// Deploy a FileSource stream and record the progress of the stream throughput. +// Expects one message for each TransferDataImpl call to the FileSource. +TEST_F(StirlingErrorTest, StreamStatusThroughput) { + // Register StirlingErrorConnector. + std::unique_ptr registry = std::make_unique(); + registry->RegisterOrDie("stirling_error"); + + // Run Stirling. + InitStirling(std::move(registry)); + ASSERT_OK(stirling_->RunAsThread()); + ASSERT_OK(stirling_->WaitUntilRunning(std::chrono::seconds(5))); + + auto file_stream_pxl = R"( +import pxlog +glob_pattern = '$0' +table_name = '$1' +pxlog.FileSource(glob_pattern, table_name, "1m") +)"; + + const auto glob_pattern = + BazelRunfilePath("src/stirling/source_connectors/stirling_error/testdata/test.json").string(); + const auto table_name = "test.json"; + + ASSERT_OK_AND_ASSIGN( + auto id, DeployFileSource(absl::Substitute(file_stream_pxl, glob_pattern, table_name))); + + // Stirling Error Source Connector Initialization. + WaitAndExpectStatusRecords(std::vector{ + {.source_connector = "stirling_error", + .status = px::statuspb::Code::OK, + .error = "", + .context = "Init"}, + }); + // Tracepoint deployed. + WaitAndExpectStatusRecords( + std::vector{{.stream_id = glob_pattern, .bytes_sent = 587, .info = ""}}); + + // Remove file source; + ASSERT_OK(stirling_->RemoveFileSource(id)); + StatusOr s; + do { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + s = stirling_->GetFileSourceInfo(id); + } while (s.ok()); + + // TODO(ddelnano): Add file source removal message assertion. + + stirling_->Stop(); +} + } // namespace stirling } // namespace px diff --git a/src/stirling/source_connectors/stirling_error/stirling_error_connector.cc b/src/stirling/source_connectors/stirling_error/stirling_error_connector.cc index e186c53b60b..1be86eb2794 100644 --- a/src/stirling/source_connectors/stirling_error/stirling_error_connector.cc +++ b/src/stirling/source_connectors/stirling_error/stirling_error_connector.cc @@ -39,7 +39,7 @@ Status StirlingErrorConnector::InitImpl() { Status StirlingErrorConnector::StopImpl() { return Status::OK(); } void StirlingErrorConnector::TransferDataImpl(ConnectorContext* ctx) { - DCHECK_EQ(data_tables_.size(), 2U) << "StirlingErrorConnector has two data tables."; + DCHECK_EQ(data_tables_.size(), 4U) << "StirlingErrorConnector has four data tables."; if (data_tables_[kStirlingErrorTableNum] != nullptr) { TransferStirlingErrorTable(ctx, data_tables_[kStirlingErrorTableNum]); @@ -48,6 +48,10 @@ void StirlingErrorConnector::TransferDataImpl(ConnectorContext* ctx) { if (data_tables_[kProbeStatusTableNum] != nullptr) { TransferProbeStatusTable(ctx, data_tables_[kProbeStatusTableNum]); } + + if (data_tables_[kStreamStatusTableNum] != nullptr) { + TransferStreamStatusTable(ctx, data_tables_[kStreamStatusTableNum]); + } } void StirlingErrorConnector::TransferStirlingErrorTable(ConnectorContext* ctx, @@ -79,5 +83,18 @@ void StirlingErrorConnector::TransferProbeStatusTable(ConnectorContext* ctx, } } +void StirlingErrorConnector::TransferStreamStatusTable(ConnectorContext* ctx, + DataTable* data_table) { + md::UPID upid = md::UPID(ctx->GetASID(), pid_, start_time_); + for (auto& record : monitor_.ConsumeStreamStatusRecords()) { + DataTable::RecordBuilder<&kStreamStatusTable> r(data_table, record.timestamp_ns); + r.Append(static_cast(record.timestamp_ns)); + r.Append(upid.value()); + r.Append(std::move(record.stream_id)); + r.Append(static_cast(record.bytes_sent)); + r.Append(std::move(record.info)); + } +} + } // namespace stirling } // namespace px diff --git a/src/stirling/source_connectors/stirling_error/stirling_error_connector.h b/src/stirling/source_connectors/stirling_error/stirling_error_connector.h index 0dae755c947..21db2a7c7f6 100644 --- a/src/stirling/source_connectors/stirling_error/stirling_error_connector.h +++ b/src/stirling/source_connectors/stirling_error/stirling_error_connector.h @@ -26,7 +26,9 @@ #include "src/common/base/base.h" #include "src/stirling/core/source_connector.h" #include "src/stirling/source_connectors/stirling_error/probe_status_table.h" +#include "src/stirling/source_connectors/stirling_error/sink_results_table.h" #include "src/stirling/source_connectors/stirling_error/stirling_error_table.h" +#include "src/stirling/source_connectors/stirling_error/stream_status_table.h" #include "src/stirling/utils/monitor.h" namespace px { @@ -37,9 +39,11 @@ class StirlingErrorConnector : public SourceConnector { static constexpr std::string_view kName = "stirling_error"; static constexpr auto kSamplingPeriod = std::chrono::milliseconds{1000}; static constexpr auto kPushPeriod = std::chrono::milliseconds{1000}; - static constexpr auto kTables = MakeArray(kStirlingErrorTable, kProbeStatusTable); + static constexpr auto kTables = + MakeArray(kStirlingErrorTable, kProbeStatusTable, kStreamStatusTable, kSinkResultsTable); static constexpr uint32_t kStirlingErrorTableNum = TableNum(kTables, kStirlingErrorTable); static constexpr uint32_t kProbeStatusTableNum = TableNum(kTables, kProbeStatusTable); + static constexpr uint32_t kStreamStatusTableNum = TableNum(kTables, kStreamStatusTable); StirlingErrorConnector() = delete; ~StirlingErrorConnector() override = default; @@ -59,6 +63,7 @@ class StirlingErrorConnector : public SourceConnector { void TransferStirlingErrorTable(ConnectorContext* ctx, DataTable* data_table); void TransferProbeStatusTable(ConnectorContext* ctx, DataTable* data_table); + void TransferStreamStatusTable(ConnectorContext* ctx, DataTable* data_table); StirlingMonitor& monitor_ = *StirlingMonitor::GetInstance(); int32_t pid_ = -1; diff --git a/src/stirling/source_connectors/stirling_error/stream_status_table.h b/src/stirling/source_connectors/stirling_error/stream_status_table.h new file mode 100644 index 00000000000..160694cbcad --- /dev/null +++ b/src/stirling/source_connectors/stirling_error/stream_status_table.h @@ -0,0 +1,51 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include "src/common/base/base.h" +#include "src/stirling/core/canonical_types.h" +#include "src/stirling/core/output.h" +#include "src/stirling/core/source_connector.h" + +namespace px { +namespace stirling { + +// clang-format off +constexpr DataElement kStreamStatusElements[] = { + canonical_data_elements::kTime, + canonical_data_elements::kUPID, + {"stream_id", "The ID of the stream of interest. For file source connector this is glob_pattern", + types::DataType::STRING, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, + {"bytes_sent", "The error messages of the deployment or event, if any", + types::DataType::INT64, types::SemanticType::ST_BYTES, types::PatternType::METRIC_COUNTER}, + {"info", "Optional extra info provided as a JSON", + types::DataType::STRING, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, +}; + +constexpr DataTableSchema kStreamStatusTable { + "stream_status", + "This table contains the status of streams Stirling is ingested across various source connectors", + kStreamStatusElements +}; + +// clang-format on +DEFINE_PRINT_TABLE(StreamStatus); + +} // namespace stirling +} // namespace px diff --git a/src/stirling/source_connectors/stirling_error/testdata/test.json b/src/stirling/source_connectors/stirling_error/testdata/test.json new file mode 100644 index 00000000000..96b30cbd35c --- /dev/null +++ b/src/stirling/source_connectors/stirling_error/testdata/test.json @@ -0,0 +1,10 @@ +{"id": 1, "active": true, "score": 3.14, "name": "item1"} +{"id": 2, "active": false, "score": 2.71, "name": "item2"} +{"id": 3, "active": true, "score": 1.41, "name": "item3"} +{"id": 4, "active": false, "score": 1.73, "name": "item4"} +{"id": 5, "active": true, "score": 0.99, "name": "item5"} +{"id": 6, "active": false, "score": 2.18, "name": "item6"} +{"id": 7, "active": true, "score": 3.67, "name": "item7"} +{"id": 8, "active": false, "score": 4.56, "name": "item8"} +{"id": 9, "active": true, "score": 5.32, "name": "item9"} +{"id": 10, "active": false, "score": 6.28, "name": "item10"} diff --git a/src/stirling/source_connectors/stirling_error/testdata/unsupported.json b/src/stirling/source_connectors/stirling_error/testdata/unsupported.json new file mode 100644 index 00000000000..455064ea679 --- /dev/null +++ b/src/stirling/source_connectors/stirling_error/testdata/unsupported.json @@ -0,0 +1 @@ +{"id": 1, "active": true, "score": 3.14, "name": "item1", "unsupported": {"a": 1, "b": 2}} diff --git a/src/stirling/stirling.cc b/src/stirling/stirling.cc index 5b15a5ecabd..fd35286854e 100644 --- a/src/stirling/stirling.cc +++ b/src/stirling/stirling.cc @@ -46,6 +46,7 @@ #include "src/stirling/source_connectors/dynamic_bpftrace/dynamic_bpftrace_connector.h" #include "src/stirling/source_connectors/dynamic_bpftrace/utils.h" #include "src/stirling/source_connectors/dynamic_tracer/dynamic_trace_connector.h" +#include "src/stirling/source_connectors/file_source/file_source_connector.h" #include "src/stirling/source_connectors/jvm_stats/jvm_stats_connector.h" #include "src/stirling/source_connectors/network_stats/network_stats_connector.h" #include "src/stirling/source_connectors/perf_profiler/perf_profile_connector.h" @@ -200,8 +201,11 @@ class StirlingImpl final : public Stirling { void RegisterTracepoint( sole::uuid uuid, std::unique_ptr program) override; + void RegisterFileSource(sole::uuid id, std::string file_name) override; StatusOr GetTracepointInfo(sole::uuid trace_id) override; + StatusOr GetFileSourceInfo(sole::uuid trace_id) override; Status RemoveTracepoint(sole::uuid trace_id) override; + Status RemoveFileSource(sole::uuid trace_id) override; void GetPublishProto(stirlingpb::Publish* publish_pb) override; void RegisterDataPushCallback(DataPushCallback f) override { data_push_callback_ = f; } void RegisterAgentMetadataCallback(AgentMetadataCallback f) override { @@ -224,9 +228,12 @@ class StirlingImpl final : public Stirling { void UpdateDynamicTraceStatus(const sole::uuid& uuid, const StatusOr& status); + void UpdateFileSourceStatus(const sole::uuid& uuid, const StatusOr& status); + private: // Adds a source to Stirling, and updates all state accordingly. - Status AddSource(std::unique_ptr source); + Status AddSource(std::unique_ptr source, + std::optional mutation_id = {}); // Removes a source and all its info classes from stirling. Status RemoveSource(std::string_view source_name); @@ -239,6 +246,11 @@ class StirlingImpl final : public Stirling { // Destroys a dynamic tracing source created by DeployDynamicTraceConnector. void DestroyDynamicTraceConnector(sole::uuid trace_id); + // Creates and deploys file source connector + void DeployFileSourceConnector(sole::uuid trace_id, std::string file_name); + + void DestroyFileSourceConnector(sole::uuid id); + // Main run implementation. void RunCore(); @@ -277,6 +289,10 @@ class StirlingImpl final : public Stirling { absl::flat_hash_map> dynamic_trace_status_map_ ABSL_GUARDED_BY(dynamic_trace_status_map_lock_); + absl::base_internal::SpinLock file_source_status_map_lock_; + absl::flat_hash_map> file_source_status_map_ + ABSL_GUARDED_BY(file_source_status_map_lock_); + StirlingMonitor& monitor_ = *StirlingMonitor::GetInstance(); struct DynamicTraceInfo { @@ -288,6 +304,15 @@ class StirlingImpl final : public Stirling { absl::flat_hash_map trace_id_info_map_ ABSL_GUARDED_BY(dynamic_trace_status_map_lock_); + struct FileSourceInfo { + std::string source_connector; + std::string file_name; + std::string output_table; + }; + + absl::flat_hash_map file_source_info_map_ + ABSL_GUARDED_BY(file_source_status_map_lock_); + // RunCoreStats tracks how much work is accomplished in each run core iteration, // and it also keeps a histogram of sleep durations. RunCoreStats run_core_stats_; @@ -427,7 +452,8 @@ std::unique_ptr StirlingImpl::GetContext() { return std::unique_ptr(new SystemWideStandaloneContext()); } -Status StirlingImpl::AddSource(std::unique_ptr source) { +Status StirlingImpl::AddSource(std::unique_ptr source, + std::optional mutation_id) { PX_RETURN_IF_ERROR(source->Init()); absl::base_internal::SpinLockHolder lock(&info_class_mgrs_lock_); @@ -438,6 +464,9 @@ Status StirlingImpl::AddSource(std::unique_ptr source) { LOG(INFO) << absl::Substitute("Adding info class: [$0/$1]", source->name(), schema.name()); auto mgr = std::make_unique(schema); mgr->SetSourceConnector(source.get()); + if (mutation_id.has_value()) { + mgr->SetMutationId(mutation_id.value()); + } data_tables.push_back(mgr->data_table()); info_class_mgrs_.push_back(std::move(mgr)); } @@ -499,6 +528,13 @@ Status StirlingImpl::RemoveSource(std::string_view source_name) { namespace { constexpr char kDynTraceSourcePrefix[] = "DT_"; +constexpr char kFileSourcePrefix[] = "LOG_"; + +StatusOr> CreateFileSourceConnector(sole::uuid id, + std::string file_name) { + auto name = absl::StrCat(kFileSourcePrefix, id.str()); + return FileSourceConnector::Create(name, file_name); +} StatusOr> CreateDynamicSourceConnector( sole::uuid trace_id, @@ -535,6 +571,82 @@ StatusOr> CreateDynamicSourceConnector( } // namespace +void StirlingImpl::UpdateFileSourceStatus(const sole::uuid& id, + const StatusOr& s) { + absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); + file_source_status_map_[id] = s; + + // Find program name and log dynamic trace status update to Stirling Monitor. + auto it = file_source_info_map_.find(id); + if (it != file_source_info_map_.end()) { + FileSourceInfo& file_source_info = it->second; + + // Build info JSON with trace_id and output_table. + ::px::utils::JSONObjectBuilder builder; + builder.WriteKV("trace_id", id.str()); + if (s.ok()) { + builder.WriteKV("output_table", file_source_info.output_table); + } + + monitor_.AppendSourceStatusRecord(file_source_info.source_connector, s.status(), + builder.GetString()); + + // Clean up map if status is not ok. When status is RESOURCE_UNAVAILABLE, either deployment + // or removal is pending, so don't clean up. + if (!s.ok() && s.code() != statuspb::Code::RESOURCE_UNAVAILABLE) { + file_source_info_map_.erase(id); + } + } +} + +void StirlingImpl::DeployFileSourceConnector(sole::uuid id, std::string file_name) { + auto timer = ElapsedTimer(); + timer.Start(); + + // Try creating the DynamicTraceConnector--which compiles BCC code. + // On failure, set status and exit. + auto source_or_s = CreateFileSourceConnector(id, file_name); + if (!source_or_s.ok()) { + Status ret_status(px::statuspb::Code::INTERNAL, source_or_s.msg()); + UpdateFileSourceStatus(id, ret_status); + LOG(INFO) << ret_status.ToString(); + return; + } + auto source = source_or_s.ConsumeValueOrDie(); + + LOG(INFO) << absl::Substitute("FileSourceConnector [$0] created in $1 ms.", source->name(), + timer.ElapsedTime_us() / 1000.0); + + // Cache table schema name as source will be moved below. + std::string output_name(source->table_schemas()[0].name()); + + { + absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); + auto it = file_source_info_map_.find(id); + if (it != file_source_info_map_.end()) { + file_source_info_map_[id].output_table = output_name; + } + } + + timer.Start(); + auto s = AddSource(std::move(source), id.str()); + if (!s.ok()) { + UpdateFileSourceStatus(id, s); + LOG(INFO) << s.ToString(); + return; + } + LOG(INFO) << absl::Substitute("FileSourceConnector [$0] created in $1 ms.", id.str(), + timer.ElapsedTime_us() / 1000.0); + + stirlingpb::Publish publication; + { + absl::base_internal::SpinLockHolder lock(&info_class_mgrs_lock_); + PopulatePublishProto(&publication, info_class_mgrs_, output_name); + } + + UpdateFileSourceStatus(id, publication); +} + void StirlingImpl::DeployDynamicTraceConnector( sole::uuid trace_id, std::unique_ptr program) { @@ -563,7 +675,7 @@ void StirlingImpl::DeployDynamicTraceConnector( timer.Start(); // Next, try adding the source (this actually tries to deploy BPF code). // On failure, set status and exit, but do this outside the lock for efficiency reasons. - RETURN_IF_ERROR(AddSource(std::move(source))); + RETURN_IF_ERROR(AddSource(std::move(source), trace_id.str())); LOG(INFO) << absl::Substitute("DynamicTrace [$0]: Deployed BPF program in $1 ms.", trace_id.str(), timer.ElapsedTime_us() / 1000.0); @@ -594,6 +706,29 @@ void StirlingImpl::DestroyDynamicTraceConnector(sole::uuid trace_id) { } } +void StirlingImpl::DestroyFileSourceConnector(sole::uuid trace_id) { + auto timer = ElapsedTimer(); + timer.Start(); + + // Remove from stirling. + auto s = RemoveSource(kFileSourcePrefix + trace_id.str()); + if (!s.ok()) { + UpdateFileSourceStatus(trace_id, s); + LOG(INFO) << s.ToString(); + return; + } + + LOG(INFO) << absl::Substitute("FileSource [$0]: Removed file polling $1 ms.", trace_id.str(), + timer.ElapsedTime_us() / 1000.0); + + // Remove from map. + { + absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); + file_source_status_map_.erase(trace_id); + file_source_info_map_.erase(trace_id); + } +} + #undef RETURN_ERROR #undef RETURN_IF_ERROR #undef ASSIGN_OR_RETURN @@ -652,6 +787,29 @@ void StirlingImpl::RegisterTracepoint( t.detach(); } +void StirlingImpl::RegisterFileSource(sole::uuid id, std::string file_name) { + // Temporary: Check if the target exists on this PEM, otherwise return NotFound. + // TODO(oazizi): Need to think of a better way of doing this. + // Need to differentiate errors caused by the binary not being on the host vs + // other errors. Also should consider races with binary creation/deletion. + { + absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); + std::string source_connector = "file_source"; + file_source_info_map_[id] = {.source_connector = std::move(source_connector), + .file_name = file_name, + .output_table = ""}; + } + + // Initialize the status of this trace to pending. + { + absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); + file_source_status_map_[id] = error::ResourceUnavailable("Waiting for file polling to start."); + } + + auto t = std::thread(&StirlingImpl::DeployFileSourceConnector, this, id, file_name); + t.detach(); +} + StatusOr StirlingImpl::GetTracepointInfo(sole::uuid trace_id) { absl::base_internal::SpinLockHolder lock(&dynamic_trace_status_map_lock_); @@ -664,6 +822,18 @@ StatusOr StirlingImpl::GetTracepointInfo(sole::uuid trace_i return s; } +StatusOr StirlingImpl::GetFileSourceInfo(sole::uuid trace_id) { + absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); + + auto iter = file_source_status_map_.find(trace_id); + if (iter == file_source_status_map_.end()) { + return error::NotFound("FileSource $0 not found.", trace_id.str()); + } + + StatusOr s = iter->second; + return s; +} + Status StirlingImpl::RemoveTracepoint(sole::uuid trace_id) { // Change the status of this trace to pending while we delete it. UpdateDynamicTraceStatus(trace_id, error::ResourceUnavailable("Probe removal in progress.")); @@ -674,6 +844,16 @@ Status StirlingImpl::RemoveTracepoint(sole::uuid trace_id) { return Status::OK(); } +Status StirlingImpl::RemoveFileSource(sole::uuid trace_id) { + // Change the status of this trace to pending while we delete it. + UpdateFileSourceStatus(trace_id, error::ResourceUnavailable("file source removal in progress.")); + + auto t = std::thread(&StirlingImpl::DestroyFileSourceConnector, this, trace_id); + t.detach(); + + return Status::OK(); +} + void StirlingImpl::GetPublishProto(stirlingpb::Publish* publish_pb) { absl::base_internal::SpinLockHolder lock(&info_class_mgrs_lock_); PopulatePublishProto(publish_pb, info_class_mgrs_); diff --git a/src/stirling/stirling.h b/src/stirling/stirling.h index 16a1d65c6e0..86231e05193 100644 --- a/src/stirling/stirling.h +++ b/src/stirling/stirling.h @@ -122,6 +122,10 @@ class Stirling : public NotCopyable { * Returns the status of the probe registration for the trace identified by the input ID. */ virtual StatusOr GetTracepointInfo(sole::uuid trace_id) = 0; + virtual StatusOr GetFileSourceInfo(sole::uuid trace_id) = 0; + + virtual void RegisterFileSource(sole::uuid id, std::string file_name) = 0; + virtual Status RemoveFileSource(sole::uuid id) = 0; /** * Remove a dynamically created tracepoint. diff --git a/src/stirling/testing/common.h b/src/stirling/testing/common.h index c754380eb34..ef8fda4a796 100644 --- a/src/stirling/testing/common.h +++ b/src/stirling/testing/common.h @@ -176,7 +176,7 @@ inline types::ColumnWrapperRecordBatch ExtractRecordsMatchingPID(DataTable* data class Timeout { public: - explicit Timeout(std::chrono::nanoseconds timeout = std::chrono::minutes{5}) + explicit Timeout(std::chrono::nanoseconds timeout = std::chrono::minutes{1}) : timeout_(timeout), start_(std::chrono::steady_clock::now()) {} bool TimedOut() { return !((std::chrono::steady_clock::now() - start_) < timeout_); } diff --git a/src/stirling/testing/overloads.h b/src/stirling/testing/overloads.h index f29062e857f..8a4a8b008f1 100644 --- a/src/stirling/testing/overloads.h +++ b/src/stirling/testing/overloads.h @@ -53,6 +53,16 @@ inline void PrintTo(const ProbeStatusRecord& r, std::ostream* os) { r.info); } +inline bool operator==(const StreamStatusRecord& a, const StreamStatusRecord& b) { + return (a.stream_id == b.stream_id) && (a.bytes_sent == b.bytes_sent) && (a.info == b.info); +} + +inline void PrintTo(const StreamStatusRecord& r, std::ostream* os) { + *os << absl::Substitute( + "StreamStatusRecord{timestamp_ns: $0, stream_id: $1, bytes_sent: $2, info: $3}", + r.timestamp_ns, r.stream_id, r.bytes_sent, r.info); +} + inline bool operator==(const TcpStatsRecord& a, const TcpStatsRecord& b) { return (a.remote_port == b.remote_port) && (a.remote_addr == b.remote_addr) && (a.tx == b.tx) && (a.rx == b.rx) && (a.retransmits == b.retransmits); diff --git a/src/stirling/testing/stirling_mock.h b/src/stirling/testing/stirling_mock.h index 9a997af8a90..fad1c29e550 100644 --- a/src/stirling/testing/stirling_mock.h +++ b/src/stirling/testing/stirling_mock.h @@ -18,6 +18,8 @@ #pragma once +#include + #include #include #include @@ -40,6 +42,9 @@ class MockStirling : public Stirling { (override)); MOCK_METHOD(StatusOr, GetTracepointInfo, (sole::uuid trace_id), (override)); MOCK_METHOD(Status, RemoveTracepoint, (sole::uuid trace_id), (override)); + MOCK_METHOD(void, RegisterFileSource, (sole::uuid trace_id, std::string file_name), (override)); + MOCK_METHOD(StatusOr, GetFileSourceInfo, (sole::uuid trace_id), (override)); + MOCK_METHOD(Status, RemoveFileSource, (sole::uuid trace_id), (override)); MOCK_METHOD(void, GetPublishProto, (stirlingpb::Publish * publish_pb), (override)); MOCK_METHOD(void, RegisterDataPushCallback, (DataPushCallback f), (override)); MOCK_METHOD(void, RegisterAgentMetadataCallback, (AgentMetadataCallback f), (override)); diff --git a/src/stirling/utils/monitor.cc b/src/stirling/utils/monitor.cc index 673e92da35d..2341f3ee018 100644 --- a/src/stirling/utils/monitor.cc +++ b/src/stirling/utils/monitor.cc @@ -74,6 +74,12 @@ void StirlingMonitor::AppendProbeStatusRecord(const std::string& source_connecto {CurrentTimeNS(), source_connector, tracepoint, status.code(), status.msg(), info}); } +void StirlingMonitor::AppendStreamStatusRecord(const std::string& stream_id, + const int64_t bytes_sent, const std::string& info) { + absl::base_internal::SpinLockHolder lock(&stream_status_lock_); + stream_status_records_.push_back({CurrentTimeNS(), stream_id, bytes_sent, info}); +} + std::vector StirlingMonitor::ConsumeSourceStatusRecords() { absl::base_internal::SpinLockHolder lock(&source_status_lock_); return std::move(source_status_records_); @@ -84,5 +90,10 @@ std::vector StirlingMonitor::ConsumeProbeStatusRecords() { return std::move(probe_status_records_); } +std::vector StirlingMonitor::ConsumeStreamStatusRecords() { + absl::base_internal::SpinLockHolder lock(&stream_status_lock_); + return std::move(stream_status_records_); +} + } // namespace stirling } // namespace px diff --git a/src/stirling/utils/monitor.h b/src/stirling/utils/monitor.h index 214a2f49e39..596dfc7fed9 100644 --- a/src/stirling/utils/monitor.h +++ b/src/stirling/utils/monitor.h @@ -50,6 +50,14 @@ struct ProbeStatusRecord { std::string info = ""; }; +// Status of stream processing +struct StreamStatusRecord { + int64_t timestamp_ns = 0; + std::string stream_id = ""; + int64_t bytes_sent = 0; + std::string info = ""; +}; + class StirlingMonitor : NotCopyMoveable { public: static StirlingMonitor* GetInstance() { @@ -65,10 +73,13 @@ class StirlingMonitor : NotCopyMoveable { // Stirling Error Reporting. void AppendProbeStatusRecord(const std::string& source_connector, const std::string& tracepoint, const Status& status, const std::string& info); + void AppendStreamStatusRecord(const std::string& stream_id, const int64_t bytes_sent, + const std::string& info); void AppendSourceStatusRecord(const std::string& source_connector, const Status& status, const std::string& context); std::vector ConsumeProbeStatusRecords(); std::vector ConsumeSourceStatusRecords(); + std::vector ConsumeStreamStatusRecords(); static constexpr auto kCrashWindow = std::chrono::seconds{5}; @@ -81,10 +92,13 @@ class StirlingMonitor : NotCopyMoveable { std::vector probe_status_records_ ABSL_GUARDED_BY(probe_status_lock_); // Records of Stirling Source Connector status. std::vector source_status_records_ ABSL_GUARDED_BY(source_status_lock_); + // Records of Stirling stream connector status. + std::vector stream_status_records_ ABSL_GUARDED_BY(stream_status_lock_); // Lock to protect probe and source records. absl::base_internal::SpinLock probe_status_lock_; absl::base_internal::SpinLock source_status_lock_; + absl::base_internal::SpinLock stream_status_lock_; prometheus::Counter& java_proc_crashed_during_attach_; }; diff --git a/src/table_store/schema/relation.cc b/src/table_store/schema/relation.cc index da087835e1b..d2ca4a35605 100644 --- a/src/table_store/schema/relation.cc +++ b/src/table_store/schema/relation.cc @@ -38,6 +38,11 @@ Relation::Relation() = default; Relation::Relation(ColTypeArray col_types, ColNameArray col_names) : Relation(col_types, col_names, ColDescArray(col_types.size(), "")) {} +Relation::Relation(ColTypeArray col_types, ColNameArray col_names, std::optional mutation_id) + : Relation(col_types, col_names, ColDescArray(col_types.size(), "")) { + mutation_id_ = mutation_id; + } + Relation::Relation(ColTypeArray col_types, ColNameArray col_names, ColDescArray col_desc) : Relation(col_types, col_names, col_desc, ColSemanticTypeArray(col_types.size(), types::ST_NONE)) {} @@ -161,6 +166,9 @@ std::string Relation::DebugString() const { for (size_t i = 0; i < col_types_.size(); ++i) { col_info_as_str.push_back(absl::StrCat(col_names_[i], ":", types::ToString(col_types_[i]))); } + if (mutation_id_.has_value()) { + col_info_as_str.push_back(absl::Substitute("mutation_id:$0", mutation_id_.value())); + } return "[" + absl::StrJoin(col_info_as_str, ", ") + "]"; } @@ -173,6 +181,9 @@ Status Relation::ToProto(table_store::schemapb::Relation* relation_proto) const col_pb->set_column_name(GetColumnName(col_idx)); col_pb->set_column_semantic_type(GetColumnSemanticType(col_idx)); } + if (mutation_id_.has_value()) { + relation_proto->set_mutation_id(mutation_id_.value()); + } return Status::OK(); } Status Relation::FromProto(const table_store::schemapb::Relation* relation_pb) { @@ -184,6 +195,9 @@ Status Relation::FromProto(const table_store::schemapb::Relation* relation_pb) { auto column = relation_pb->columns(idx); AddColumn(column.column_type(), column.column_name(), column.column_semantic_type()); } + if (relation_pb->mutation_id().size() > 0) { + mutation_id_ = relation_pb->mutation_id(); + } return Status::OK(); } diff --git a/src/table_store/schema/relation.h b/src/table_store/schema/relation.h index f0105b65c00..5f45cdfe9d4 100644 --- a/src/table_store/schema/relation.h +++ b/src/table_store/schema/relation.h @@ -43,6 +43,7 @@ class Relation { Relation(); // Constructor for Relation that initializes with a list of column types. explicit Relation(ColTypeArray col_types, ColNameArray col_names); + explicit Relation(ColTypeArray col_types, ColNameArray col_names, std::optional mutation_id); explicit Relation(ColTypeArray col_types, ColNameArray col_names, ColDescArray col_desc); explicit Relation(ColTypeArray col_types, ColNameArray col_names, ColSemanticTypeArray col_semantic_types); @@ -118,12 +119,15 @@ class Relation { return out << relation.DebugString(); } + std::optional mutation_id() const { return mutation_id_; } + private: ColTypeArray col_types_; ColNameArray col_names_; ColDescArray col_desc_; ColSemanticTypeArray col_semantic_types_; ColPatternTypeArray col_pattern_types_; + std::optional mutation_id_; }; } // namespace schema diff --git a/src/table_store/schemapb/schema.pb.go b/src/table_store/schemapb/schema.pb.go index 93aada07afb..f7a66f48acf 100755 --- a/src/table_store/schemapb/schema.pb.go +++ b/src/table_store/schemapb/schema.pb.go @@ -486,8 +486,9 @@ func (m *RowBatchData) GetEos() bool { } type Relation struct { - Columns []*Relation_ColumnInfo `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"` - Desc string `protobuf:"bytes,2,opt,name=desc,proto3" json:"desc,omitempty"` + Columns []*Relation_ColumnInfo `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"` + Desc string `protobuf:"bytes,2,opt,name=desc,proto3" json:"desc,omitempty"` + MutationId string `protobuf:"bytes,3,opt,name=mutation_id,json=mutationId,proto3" json:"mutation_id,omitempty"` } func (m *Relation) Reset() { *m = Relation{} } @@ -536,6 +537,13 @@ func (m *Relation) GetDesc() string { return "" } +func (m *Relation) GetMutationId() string { + if m != nil { + return m.MutationId + } + return "" +} + type Relation_ColumnInfo struct { ColumnName string `protobuf:"bytes,1,opt,name=column_name,json=columnName,proto3" json:"column_name,omitempty"` ColumnType typespb.DataType `protobuf:"varint,2,opt,name=column_type,json=columnType,proto3,enum=px.types.DataType" json:"column_type,omitempty"` @@ -734,58 +742,59 @@ func init() { } var fileDescriptor_837edaf494876c32 = []byte{ - // 810 bytes of a gzipped FileDescriptorProto + // 825 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0x77, 0xe3, 0xd8, 0x75, 0xde, 0x6e, 0xaa, 0x32, 0x2a, 0x50, 0x7c, 0xd8, 0x50, 0x37, - 0x81, 0x1e, 0x90, 0x0d, 0x4e, 0x65, 0xa2, 0x4a, 0x70, 0x30, 0x6d, 0xe4, 0xa8, 0xc2, 0x42, 0xe3, - 0x70, 0xe1, 0x80, 0x35, 0xde, 0x4c, 0x93, 0x15, 0xbb, 0x3b, 0xab, 0x9d, 0x71, 0x1d, 0xdf, 0x10, - 0x17, 0xae, 0x7c, 0x06, 0x4e, 0x7c, 0x0c, 0x8e, 0x1c, 0x73, 0xac, 0x10, 0x42, 0xc4, 0xb9, 0x70, - 0xcc, 0x47, 0x40, 0xf3, 0x66, 0x36, 0x5e, 0xa3, 0x2e, 0xe1, 0x92, 0x3c, 0x8f, 0xff, 0xef, 0x37, - 0x7f, 0xbd, 0xff, 0x5b, 0x2f, 0xec, 0xc9, 0x3c, 0xec, 0x2a, 0x36, 0x8d, 0xf9, 0x44, 0x2a, 0x91, - 0xf3, 0xae, 0x0c, 0xcf, 0x78, 0xc2, 0xb2, 0xa9, 0x2d, 0x3a, 0x59, 0x2e, 0x94, 0x20, 0xef, 0x66, - 0xe7, 0x9d, 0x92, 0xaa, 0x53, 0xa8, 0x5a, 0xf7, 0x4f, 0xc5, 0xa9, 0x40, 0x4d, 0x57, 0x57, 0x46, - 0xde, 0xda, 0xd5, 0x54, 0x79, 0xc6, 0x72, 0x7e, 0xd2, 0x55, 0x8b, 0x8c, 0x4b, 0xf3, 0x37, 0x9b, - 0x9a, 0xff, 0x46, 0xd5, 0x7e, 0x04, 0xdb, 0x03, 0x21, 0x62, 0xce, 0xd2, 0x2f, 0x44, 0x3c, 0x4b, - 0x52, 0x42, 0x60, 0xf3, 0x84, 0x29, 0xf6, 0xc0, 0x7d, 0xbf, 0xf6, 0xb8, 0x49, 0xb1, 0x6e, 0x3f, - 0x04, 0xef, 0x28, 0x55, 0xfd, 0x27, 0x6f, 0x90, 0xd4, 0xac, 0xa4, 0x0f, 0xdb, 0x5f, 0x1f, 0xa5, - 0xea, 0x93, 0xde, 0x81, 0x15, 0xed, 0x95, 0x44, 0x5e, 0xef, 0xad, 0x8e, 0x36, 0x8f, 0xf7, 0x5a, - 0x99, 0xed, 0x7b, 0x04, 0xdb, 0x87, 0xb1, 0x60, 0x6f, 0x86, 0xbb, 0x56, 0xb4, 0x0b, 0x77, 0x8f, - 0xa3, 0x84, 0xf7, 0x9f, 0x8c, 0xc6, 0xff, 0x61, 0xe1, 0x5b, 0xf0, 0xc7, 0x2a, 0x8f, 0xd2, 0x53, - 0xab, 0x19, 0x95, 0x34, 0xfe, 0xe0, 0xe9, 0xef, 0x7f, 0xee, 0xf4, 0xb3, 0xf3, 0xce, 0x09, 0x7f, - 0xd5, 0xcd, 0xa2, 0xf3, 0x88, 0x77, 0x2b, 0xa7, 0x6e, 0x7c, 0x1a, 0xd6, 0x33, 0xa6, 0x98, 0xe5, - 0xff, 0x51, 0x83, 0x86, 0x45, 0xbf, 0x00, 0x7f, 0x6a, 0xa6, 0x36, 0xb1, 0x57, 0xb8, 0x8f, 0xbd, - 0xde, 0x07, 0x9d, 0x8a, 0x84, 0x3a, 0x6b, 0x23, 0x1e, 0x3a, 0xd4, 0xb3, 0xdd, 0x9a, 0x4e, 0x9e, - 0x03, 0x44, 0x7a, 0xba, 0x06, 0xb5, 0x81, 0xa8, 0xdd, 0x4a, 0x54, 0x29, 0x88, 0xa1, 0x43, 0xb7, - 0xb0, 0x13, 0x31, 0x2f, 0xc0, 0x9f, 0x45, 0x38, 0x5a, 0x03, 0xaa, 0xdd, 0xe2, 0x69, 0x2d, 0x2e, - 0xed, 0xc9, 0x76, 0x23, 0x6c, 0x04, 0xdb, 0x0a, 0x27, 0x9e, 0x4a, 0x43, 0xdb, 0x44, 0xda, 0x87, - 0x95, 0xb4, 0xf5, 0x7c, 0x86, 0x0e, 0xf5, 0x8b, 0xfe, 0xc2, 0xdc, 0x4b, 0x13, 0xb3, 0xc1, 0xd5, - 0x6f, 0x31, 0xb7, 0xb6, 0x13, 0xda, 0x9c, 0xed, 0x46, 0xd8, 0x10, 0x3c, 0x89, 0xe1, 0x18, 0x56, - 0x03, 0x59, 0x7b, 0x95, 0xac, 0xf2, 0x52, 0x0c, 0x1d, 0x0a, 0xf2, 0x26, 0xd8, 0x01, 0x40, 0x33, - 0x14, 0x31, 0x62, 0xda, 0x3f, 0xb8, 0xe0, 0x53, 0x31, 0x1f, 0x30, 0x15, 0x9e, 0xe1, 0x35, 0xfb, - 0xb0, 0x19, 0x8a, 0x58, 0xda, 0x0d, 0xde, 0xa9, 0xe4, 0x1b, 0x32, 0x45, 0x31, 0x79, 0x0f, 0x9a, - 0xe9, 0x2c, 0x99, 0xe4, 0x62, 0x2e, 0x31, 0xca, 0x1a, 0xbd, 0x93, 0xce, 0x12, 0x2a, 0xe6, 0x92, - 0xdc, 0x83, 0x1a, 0x17, 0x73, 0xcc, 0xa5, 0x49, 0x75, 0x69, 0x4e, 0x24, 0xce, 0x16, 0x4f, 0x64, - 0xfb, 0x7a, 0x03, 0x9a, 0x94, 0xc7, 0x4c, 0x45, 0x22, 0x25, 0x87, 0x70, 0x27, 0x44, 0x76, 0xe1, - 0xe1, 0xa3, 0x4a, 0x0f, 0x45, 0x8f, 0x35, 0x73, 0x94, 0xbe, 0x14, 0xb4, 0x68, 0xc6, 0x87, 0x85, - 0xcb, 0x10, 0xfd, 0x6c, 0x51, 0xac, 0x5b, 0x3f, 0x6e, 0x00, 0xac, 0xb4, 0x64, 0x07, 0x3c, 0xa3, - 0x9e, 0xa4, 0x2c, 0xe1, 0xb8, 0xcf, 0x5b, 0x14, 0xcc, 0xd1, 0x88, 0x25, 0x9c, 0xec, 0xdf, 0x08, - 0xf4, 0xd3, 0x81, 0xa8, 0xbb, 0x3d, 0xb2, 0x7a, 0xaa, 0xf5, 0xc4, 0x8e, 0x17, 0x19, 0x2f, 0x9a, - 0x74, 0x5d, 0xa2, 0xe2, 0xfd, 0xb5, 0x32, 0xf5, 0x19, 0x97, 0x21, 0x19, 0xc2, 0x7d, 0x2b, 0x90, - 0x3c, 0x61, 0xa9, 0x8a, 0x42, 0x83, 0xdf, 0x44, 0xfc, 0x3b, 0x2b, 0xfc, 0xd8, 0x7e, 0x8d, 0x57, - 0x10, 0xd3, 0x53, 0x3e, 0x23, 0x07, 0xe0, 0x67, 0x4c, 0x29, 0x9e, 0x5b, 0x83, 0x75, 0x24, 0xbc, - 0xbd, 0x22, 0x7c, 0x65, 0xbe, 0x45, 0x80, 0x97, 0xad, 0x3e, 0xb4, 0x7f, 0x76, 0xa1, 0x7e, 0xac, - 0x67, 0x4a, 0x3e, 0x83, 0x66, 0x6e, 0xe7, 0x68, 0xf7, 0xfd, 0xe1, 0xad, 0x03, 0xa7, 0x37, 0x2d, - 0xe4, 0x10, 0xbc, 0x5c, 0xcc, 0x27, 0x53, 0xbd, 0x40, 0x5c, 0x3e, 0xa8, 0x63, 0x64, 0xd5, 0x6b, - 0x59, 0xde, 0x35, 0x0a, 0xb9, 0xfd, 0xc4, 0x31, 0x2e, 0x0c, 0xa1, 0x61, 0xe2, 0xd2, 0x75, 0xfb, - 0x57, 0x17, 0x1a, 0x63, 0xec, 0x24, 0x63, 0xf0, 0x8b, 0x2b, 0x27, 0x09, 0xcb, 0xec, 0x6a, 0x7c, - 0x5c, 0xbd, 0xfe, 0xe6, 0x1d, 0x52, 0x18, 0xfe, 0x92, 0x65, 0xcf, 0x53, 0x95, 0x2f, 0xa8, 0x97, - 0xaf, 0x4e, 0x5a, 0x0c, 0xee, 0xfd, 0x5b, 0xa0, 0xb7, 0xf3, 0x3b, 0xbe, 0xb0, 0xbb, 0xa0, 0x4b, - 0xf2, 0x29, 0xd4, 0x5f, 0xb1, 0x78, 0xc6, 0xed, 0x8f, 0xd4, 0xff, 0x98, 0x8e, 0xd1, 0x3f, 0xdd, - 0x38, 0x70, 0x07, 0x9f, 0x5f, 0x5c, 0x06, 0xce, 0xeb, 0xcb, 0xc0, 0xb9, 0xbe, 0x0c, 0xdc, 0xef, - 0x97, 0x81, 0xfb, 0xcb, 0x32, 0x70, 0x7f, 0x5b, 0x06, 0xee, 0xc5, 0x32, 0x70, 0xff, 0x5a, 0x06, - 0xee, 0xdf, 0xcb, 0xc0, 0xb9, 0x5e, 0x06, 0xee, 0x4f, 0x57, 0x81, 0x73, 0x71, 0x15, 0x38, 0xaf, - 0xaf, 0x02, 0xe7, 0x9b, 0x66, 0xc1, 0x9c, 0x36, 0xf0, 0x85, 0xb5, 0xff, 0x4f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xc4, 0x65, 0x33, 0xdc, 0x2e, 0x07, 0x00, 0x00, + 0x14, 0xc7, 0x77, 0xb3, 0xb1, 0xeb, 0xbc, 0xdd, 0x54, 0x65, 0x54, 0xa0, 0xe4, 0xb0, 0xa1, 0x6e, + 0x02, 0x3d, 0x20, 0x1b, 0x92, 0x2a, 0x44, 0x95, 0xe0, 0x60, 0xda, 0xc8, 0x51, 0x85, 0x85, 0xc6, + 0xe1, 0xc2, 0x01, 0x6b, 0xbc, 0x9e, 0x26, 0x2b, 0x76, 0x77, 0x56, 0x3b, 0xe3, 0x3a, 0xbe, 0x21, + 0x2e, 0x5c, 0xe1, 0x2b, 0x70, 0xe2, 0x63, 0x70, 0xe4, 0x98, 0x63, 0x85, 0x10, 0x22, 0xce, 0x85, + 0x63, 0x3f, 0x02, 0x9a, 0x37, 0xb3, 0xf1, 0x1a, 0x75, 0x49, 0x2f, 0xc9, 0xec, 0xec, 0xff, 0xfd, + 0xe6, 0xaf, 0xf7, 0x7f, 0xe3, 0x85, 0x5d, 0x59, 0x44, 0x5d, 0xc5, 0xc6, 0x09, 0x1f, 0x49, 0x25, + 0x0a, 0xde, 0x95, 0xd1, 0x19, 0x4f, 0x59, 0x3e, 0xb6, 0x8b, 0x4e, 0x5e, 0x08, 0x25, 0xc8, 0xbb, + 0xf9, 0x79, 0xa7, 0xa2, 0xea, 0x94, 0xaa, 0xad, 0xbb, 0xa7, 0xe2, 0x54, 0xa0, 0xa6, 0xab, 0x57, + 0x46, 0xbe, 0xb5, 0xa3, 0xa9, 0xf2, 0x8c, 0x15, 0x7c, 0xd2, 0x55, 0xf3, 0x9c, 0x4b, 0xf3, 0x37, + 0x1f, 0x9b, 0xff, 0x46, 0xd5, 0x7e, 0x00, 0x9b, 0x3d, 0x21, 0x12, 0xce, 0xb2, 0x2f, 0x44, 0x32, + 0x4d, 0x33, 0x42, 0x60, 0x7d, 0xc2, 0x14, 0xbb, 0xe7, 0xbe, 0xef, 0x3d, 0x6c, 0x51, 0x5c, 0xb7, + 0xef, 0x83, 0x7f, 0x9c, 0xa9, 0x83, 0x47, 0xaf, 0x91, 0x78, 0x56, 0x72, 0x00, 0x9b, 0x5f, 0x1f, + 0x67, 0xea, 0x93, 0xbd, 0x43, 0x2b, 0xda, 0xad, 0x88, 0xfc, 0xbd, 0xb7, 0x3a, 0xda, 0x3c, 0x9e, + 0x6b, 0x65, 0xb6, 0xee, 0x01, 0x6c, 0x1e, 0x25, 0x82, 0xbd, 0x1e, 0xee, 0x5a, 0xd1, 0x0e, 0xdc, + 0x3e, 0x89, 0x53, 0x7e, 0xf0, 0x68, 0x30, 0xfc, 0x1f, 0x0b, 0xdf, 0x42, 0x30, 0x54, 0x45, 0x9c, + 0x9d, 0x5a, 0xcd, 0xa0, 0xa2, 0x09, 0x7a, 0x8f, 0xff, 0xf8, 0x6b, 0xfb, 0x20, 0x3f, 0xef, 0x4c, + 0xf8, 0x8b, 0x6e, 0x1e, 0x9f, 0xc7, 0xbc, 0x5b, 0xdb, 0x75, 0xe3, 0xd3, 0xb0, 0x9e, 0x30, 0xc5, + 0x2c, 0xff, 0x4f, 0x0f, 0x9a, 0x16, 0xfd, 0x0c, 0x82, 0xb1, 0xe9, 0xda, 0xc8, 0x1e, 0xe1, 0x3e, + 0xf4, 0xf7, 0x3e, 0xe8, 0xd4, 0x24, 0xd4, 0x59, 0x69, 0x71, 0xdf, 0xa1, 0xbe, 0xad, 0xd6, 0x74, + 0xf2, 0x14, 0x20, 0xd6, 0xdd, 0x35, 0xa8, 0x35, 0x44, 0xed, 0xd4, 0xa2, 0x2a, 0x41, 0xf4, 0x1d, + 0xba, 0x81, 0x95, 0x88, 0x79, 0x06, 0xc1, 0x34, 0xc6, 0xd6, 0x1a, 0x90, 0x77, 0x83, 0xa7, 0x95, + 0xb8, 0xb4, 0x27, 0x5b, 0x8d, 0xb0, 0x01, 0x6c, 0x2a, 0xec, 0x78, 0x26, 0x0d, 0x6d, 0x1d, 0x69, + 0x1f, 0xd6, 0xd2, 0x56, 0xf3, 0xe9, 0x3b, 0x34, 0x28, 0xeb, 0x4b, 0x73, 0xcf, 0x4d, 0xcc, 0x06, + 0xd7, 0xb8, 0xc1, 0xdc, 0xca, 0x4c, 0x68, 0x73, 0xb6, 0x1a, 0x61, 0x7d, 0xf0, 0x25, 0x86, 0x63, + 0x58, 0x4d, 0x64, 0xed, 0xd6, 0xb2, 0xaa, 0x43, 0xd1, 0x77, 0x28, 0xc8, 0xeb, 0x60, 0x7b, 0x00, + 0xad, 0x48, 0x24, 0x88, 0x69, 0xff, 0xe0, 0x42, 0x40, 0xc5, 0xac, 0xc7, 0x54, 0x74, 0x86, 0xc7, + 0xec, 0xc3, 0x7a, 0x24, 0x12, 0x69, 0x27, 0x78, 0xbb, 0x96, 0x6f, 0xc8, 0x14, 0xc5, 0xe4, 0x3d, + 0x68, 0x65, 0xd3, 0x74, 0x54, 0x88, 0x99, 0xc4, 0x28, 0x3d, 0x7a, 0x2b, 0x9b, 0xa6, 0x54, 0xcc, + 0x24, 0xb9, 0x03, 0x1e, 0x17, 0x33, 0xcc, 0xa5, 0x45, 0xf5, 0xd2, 0xec, 0x48, 0xec, 0x2d, 0xee, + 0xc8, 0xf6, 0xcf, 0x1e, 0xb4, 0x28, 0x4f, 0x98, 0x8a, 0x45, 0x46, 0x8e, 0xe0, 0x56, 0x84, 0xec, + 0xd2, 0xc3, 0x47, 0xb5, 0x1e, 0xca, 0x1a, 0x6b, 0xe6, 0x38, 0x7b, 0x2e, 0x68, 0x59, 0x8c, 0x97, + 0x85, 0xcb, 0x08, 0xfd, 0x6c, 0x50, 0x5c, 0x93, 0x6d, 0xf0, 0xd3, 0xa9, 0xc2, 0x9a, 0x51, 0x3c, + 0x41, 0x53, 0x1b, 0x14, 0xca, 0xad, 0xe3, 0xc9, 0xd6, 0x8f, 0x6b, 0x00, 0x4b, 0x98, 0xd6, 0x1b, + 0xdc, 0x28, 0x63, 0x29, 0xc7, 0x81, 0xdf, 0xa0, 0x60, 0xb6, 0x06, 0x2c, 0xe5, 0x64, 0xff, 0x5a, + 0xa0, 0xaf, 0x0f, 0x9e, 0x75, 0x7b, 0x8f, 0x2c, 0xaf, 0xbd, 0x6e, 0xe9, 0xc9, 0x3c, 0xe7, 0x65, + 0x91, 0x5e, 0x57, 0xa8, 0x68, 0xd0, 0xab, 0x52, 0x9f, 0x68, 0x9b, 0x7d, 0xb8, 0x6b, 0x05, 0x92, + 0xa7, 0x2c, 0x53, 0x71, 0x64, 0xf0, 0xeb, 0x88, 0x7f, 0x67, 0x89, 0x1f, 0xda, 0xd7, 0x78, 0x04, + 0x31, 0x35, 0xd5, 0x3d, 0x72, 0x08, 0x41, 0xce, 0x94, 0xe2, 0x85, 0x35, 0xd8, 0x40, 0xc2, 0xdb, + 0x4b, 0xc2, 0x57, 0xe6, 0x2d, 0x02, 0xfc, 0x7c, 0xf9, 0xd0, 0xfe, 0xc5, 0x85, 0xc6, 0x89, 0x6e, + 0x3a, 0xf9, 0x0c, 0x5a, 0x85, 0x6d, 0xb4, 0xbd, 0x10, 0xf7, 0x6f, 0x4c, 0x84, 0x5e, 0x97, 0x90, + 0x23, 0xf0, 0x0b, 0x31, 0x1b, 0x8d, 0xf5, 0x84, 0x71, 0x79, 0xaf, 0x81, 0x99, 0xd6, 0xcf, 0x6d, + 0x75, 0x18, 0x29, 0x14, 0xf6, 0x89, 0x63, 0x9e, 0x18, 0x42, 0xd3, 0xe4, 0xa9, 0xd7, 0xed, 0xdf, + 0x5c, 0x68, 0x0e, 0xb1, 0x92, 0x0c, 0x21, 0x28, 0x8f, 0x1c, 0xa5, 0x2c, 0xb7, 0xb3, 0xf3, 0x71, + 0xfd, 0xfd, 0x30, 0x1f, 0x99, 0xd2, 0xf0, 0x97, 0x2c, 0x7f, 0x9a, 0xa9, 0x62, 0x4e, 0xfd, 0x62, + 0xb9, 0xb3, 0xc5, 0xe0, 0xce, 0x7f, 0x05, 0x7a, 0x7c, 0xbf, 0xe3, 0x73, 0x3b, 0x0b, 0x7a, 0x49, + 0x3e, 0x85, 0xc6, 0x0b, 0x96, 0x4c, 0xb9, 0xfd, 0x15, 0x7b, 0x83, 0xee, 0x18, 0xfd, 0xe3, 0xb5, + 0x43, 0xb7, 0xf7, 0xf9, 0xc5, 0x65, 0xe8, 0xbc, 0xbc, 0x0c, 0x9d, 0x57, 0x97, 0xa1, 0xfb, 0xfd, + 0x22, 0x74, 0x7f, 0x5d, 0x84, 0xee, 0xef, 0x8b, 0xd0, 0xbd, 0x58, 0x84, 0xee, 0xdf, 0x8b, 0xd0, + 0xfd, 0x67, 0x11, 0x3a, 0xaf, 0x16, 0xa1, 0xfb, 0xd3, 0x55, 0xe8, 0x5c, 0x5c, 0x85, 0xce, 0xcb, + 0xab, 0xd0, 0xf9, 0xa6, 0x55, 0x32, 0xc7, 0x4d, 0xfc, 0xa2, 0xed, 0xff, 0x1b, 0x00, 0x00, 0xff, + 0xff, 0x2c, 0x17, 0xd8, 0xc3, 0x4f, 0x07, 0x00, 0x00, } func (this *BooleanColumn) Equal(that interface{}) bool { @@ -1204,6 +1213,9 @@ func (this *Relation) Equal(that interface{}) bool { if this.Desc != that1.Desc { return false } + if this.MutationId != that1.MutationId { + return false + } return true } func (this *Relation_ColumnInfo) Equal(that interface{}) bool { @@ -1447,12 +1459,13 @@ func (this *Relation) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&schemapb.Relation{") if this.Columns != nil { s = append(s, "Columns: "+fmt.Sprintf("%#v", this.Columns)+",\n") } s = append(s, "Desc: "+fmt.Sprintf("%#v", this.Desc)+",\n") + s = append(s, "MutationId: "+fmt.Sprintf("%#v", this.MutationId)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1983,6 +1996,13 @@ func (m *Relation) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MutationId) > 0 { + i -= len(m.MutationId) + copy(dAtA[i:], m.MutationId) + i = encodeVarintSchema(dAtA, i, uint64(len(m.MutationId))) + i-- + dAtA[i] = 0x1a + } if len(m.Desc) > 0 { i -= len(m.Desc) copy(dAtA[i:], m.Desc) @@ -2385,6 +2405,10 @@ func (m *Relation) Size() (n int) { if l > 0 { n += 1 + l + sovSchema(uint64(l)) } + l = len(m.MutationId) + if l > 0 { + n += 1 + l + sovSchema(uint64(l)) + } return n } @@ -2630,6 +2654,7 @@ func (this *Relation) String() string { s := strings.Join([]string{`&Relation{`, `Columns:` + repeatedStringForColumns + `,`, `Desc:` + fmt.Sprintf("%v", this.Desc) + `,`, + `MutationId:` + fmt.Sprintf("%v", this.MutationId) + `,`, `}`, }, "") return s @@ -3836,6 +3861,38 @@ func (m *Relation) Unmarshal(dAtA []byte) error { } m.Desc = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MutationId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSchema + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSchema + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSchema + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MutationId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipSchema(dAtA[iNdEx:]) diff --git a/src/table_store/schemapb/schema.proto b/src/table_store/schemapb/schema.proto index 25b11d7f6c4..5762aed73d2 100644 --- a/src/table_store/schemapb/schema.proto +++ b/src/table_store/schemapb/schema.proto @@ -90,6 +90,8 @@ message Relation { repeated ColumnInfo columns = 1; // Description of the table. string desc = 2; + // Mutation id of the table if one exists. + string mutation_id = 3; } // A table serialized as proto. diff --git a/src/table_store/table/internal/store_with_row_accounting.h b/src/table_store/table/internal/store_with_row_accounting.h index 842f91b8b81..d26f18816fa 100644 --- a/src/table_store/table/internal/store_with_row_accounting.h +++ b/src/table_store/table/internal/store_with_row_accounting.h @@ -53,6 +53,8 @@ void constexpr_else_static_assert_false() { static_assert(always_false, "constexpr else block reached"); } +class HotOnlyStore; + /** * StoreWithRowTimeAccounting stores a deque of batches (hot or cold) and keeps track of the first * and last unique RowID's for each batch, as well as the first and last times for each batch (if @@ -75,12 +77,28 @@ class StoreWithRowTimeAccounting { StoreWithRowTimeAccounting(const schema::Relation& rel, int64_t time_col_idx) : rel_(rel), time_col_idx_(time_col_idx) {} + Status AddBatchSliceToRowBatch(const TBatch& batch, size_t row_offset, size_t batch_size, + const std::vector& cols, + schema::RowBatch* output_rb) const { + if constexpr (std::is_same_v) { + for (auto col_idx : cols) { + auto arr = batch[col_idx]->Slice(row_offset, batch_size); + PX_RETURN_IF_ERROR(output_rb->AddColumn(arr)); + } + return Status::OK(); + } else if constexpr (std::is_same_v) { + return batch.AddBatchSliceToRowBatch(row_offset, batch_size, cols, output_rb); + } else { + constexpr_else_static_assert_false(); + } + } + /** * GetNextRowBatch returns the next row batch in this store after the given unique row id. * @param last_read_row_id, pointer to the unique RowID of the last read row. The outputted batch * should include only rows with a RowID greater than this RowID. After determining the output * batch, this pointer is updated to point to the RowID of the last row in the outputted batch. - * @param hints, pointer to a BatchHints object (usually from a Table::Cursor), that provides a + * @param hints, pointer to a BatchHints object (usually from a Cursor), that provides a * hint to the store about which batch should be next. If the hint is correct, no searching for * the right batch is required, otherwise searching is performed as usual. This is purely an * optimization and passing a `nullptr` for hints is accepted. @@ -158,16 +176,16 @@ class StoreWithRowTimeAccounting { * PopFront removes the first batch in the store, and returns an rvalue reference to it. * @return rvalue reference to the removed batch. */ - TBatch&& PopFront() { + TBatch PopFront() { DCHECK(!batches_.empty()); first_batch_id_++; row_ids_.pop_front(); if (time_col_idx_ != -1) times_.pop_front(); - auto&& front = std::move(batches_.front()); + auto front = std::move(batches_.front()); batches_.pop_front(); - return std::move(front); + return front; } /** @@ -384,28 +402,14 @@ class StoreWithRowTimeAccounting { } } - Status AddBatchSliceToRowBatch(const TBatch& batch, size_t row_offset, size_t batch_size, - const std::vector& cols, - schema::RowBatch* output_rb) const { - if constexpr (std::is_same_v) { - for (auto col_idx : cols) { - auto arr = batch[col_idx]->Slice(row_offset, batch_size); - PX_RETURN_IF_ERROR(output_rb->AddColumn(arr)); - } - return Status::OK(); - } else if constexpr (std::is_same_v) { - return batch.AddBatchSliceToRowBatch(row_offset, batch_size, cols, output_rb); - } else { - constexpr_else_static_assert_false(); - } - } - BatchID first_batch_id_ = 0; const schema::Relation& rel_; const int64_t time_col_idx_; std::deque batches_; std::deque row_ids_; std::deque times_; + + friend HotOnlyStore; }; } // namespace internal diff --git a/src/table_store/table/table.cc b/src/table_store/table/table.cc index a3bac23f4e5..6ec35efd369 100644 --- a/src/table_store/table/table.cc +++ b/src/table_store/table/table.cc @@ -48,13 +48,13 @@ DEFINE_int32(table_store_table_size_limit, namespace px { namespace table_store { -Table::Cursor::Cursor(const Table* table, StartSpec start, StopSpec stop) +Cursor::Cursor(const Table* table, StartSpec start, StopSpec stop) : table_(table), hints_(internal::BatchHints{}) { AdvanceToStart(start); StopStateFromSpec(std::move(stop)); } -void Table::Cursor::AdvanceToStart(const StartSpec& start) { +void Cursor::AdvanceToStart(const StartSpec& start) { switch (start.type) { case StartSpec::StartType::StartAtTime: { last_read_row_id_ = table_->FindRowIDFromTimeFirstGreaterThanOrEqual(start.start_time) - 1; @@ -71,7 +71,7 @@ void Table::Cursor::AdvanceToStart(const StartSpec& start) { } } -void Table::Cursor::UpdateStopStateForStopAtTime() { +void Cursor::UpdateStopStateForStopAtTime() { if (stop_.stop_row_id_final) { // Once stop_row_id is set, we know the stop time is already within the table so we don't have // to update it anymore. @@ -85,7 +85,7 @@ void Table::Cursor::UpdateStopStateForStopAtTime() { } } -void Table::Cursor::StopStateFromSpec(StopSpec&& stop) { +void Cursor::StopStateFromSpec(StopSpec&& stop) { stop_.spec = std::move(stop); switch (stop_.spec.type) { case StopSpec::StopType::CurrentEndOfTable: { @@ -110,7 +110,7 @@ void Table::Cursor::StopStateFromSpec(StopSpec&& stop) { } } -bool Table::Cursor::NextBatchReady() { +bool Cursor::NextBatchReady() { switch (stop_.spec.type) { case StopSpec::StopType::StopAtTimeOrEndOfTable: case StopSpec::StopType::CurrentEndOfTable: { @@ -127,7 +127,7 @@ bool Table::Cursor::NextBatchReady() { return false; } -bool Table::Cursor::Done() { +bool Cursor::Done() { auto next_row_id = last_read_row_id_ + 1; switch (stop_.spec.type) { case StopSpec::StopType::StopAtTimeOrEndOfTable: @@ -149,29 +149,28 @@ bool Table::Cursor::Done() { return false; } -void Table::Cursor::UpdateStopSpec(Cursor::StopSpec stop) { StopStateFromSpec(std::move(stop)); } +void Cursor::UpdateStopSpec(Cursor::StopSpec stop) { StopStateFromSpec(std::move(stop)); } -internal::RowID* Table::Cursor::LastReadRowID() { return &last_read_row_id_; } +internal::RowID* Cursor::LastReadRowID() { return &last_read_row_id_; } -internal::BatchHints* Table::Cursor::Hints() { return &hints_; } +internal::BatchHints* Cursor::Hints() { return &hints_; } -std::optional Table::Cursor::StopRowID() const { +std::optional Cursor::StopRowID() const { if (stop_.spec.type == StopSpec::StopType::Infinite) { return std::nullopt; } return stop_.stop_row_id; } -StatusOr> Table::Cursor::GetNextRowBatch( +StatusOr> Cursor::GetNextRowBatch( const std::vector& cols) { return table_->GetNextRowBatch(this, cols); } -Table::Table(std::string_view table_name, const schema::Relation& relation, size_t max_table_size, - size_t compacted_batch_size) - : metrics_(&(GetMetricsRegistry()), std::string(table_name)), - rel_(relation), - max_table_size_(max_table_size), +HotColdTable::HotColdTable(std::string_view table_name, const schema::Relation& relation, + size_t max_table_size, size_t compacted_batch_size) + : Table(TableMetrics(&(GetMetricsRegistry()), std::string(table_name)), relation, + max_table_size), compacted_batch_size_(compacted_batch_size), // TODO(james): move mem_pool into constructor. compactor_(rel_, arrow::default_memory_pool()) { @@ -189,7 +188,7 @@ Table::Table(std::string_view table_name, const schema::Relation& relation, size rel_, time_col_idx_); } -Status Table::ToProto(table_store::schemapb::Table* table_proto) const { +Status HotColdTable::ToProto(table_store::schemapb::Table* table_proto) const { CHECK(table_proto != nullptr); std::vector col_selector; for (int64_t i = 0; i < static_cast(rel_.NumColumns()); i++) { @@ -209,7 +208,7 @@ Status Table::ToProto(table_store::schemapb::Table* table_proto) const { return Status::OK(); } -StatusOr> Table::GetNextRowBatch( +StatusOr> HotColdTable::GetNextRowBatch( Cursor* cursor, const std::vector& cols) const { DCHECK(!cursor->Done()) << "Calling GetNextRowBatch on an exhausted Cursor"; absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); @@ -237,91 +236,7 @@ StatusOr> Table::GetNextRowBatch( return rb; } -Status Table::ExpireRowBatches(int64_t row_batch_size) { - if (row_batch_size > max_table_size_) { - return error::InvalidArgument("RowBatch size ($0) is bigger than maximum table size ($1).", - row_batch_size, max_table_size_); - } - int64_t bytes; - { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - bytes = batch_size_accountant_->HotBytes() + batch_size_accountant_->ColdBytes(); - } - while (bytes + row_batch_size > max_table_size_) { - PX_RETURN_IF_ERROR(ExpireBatch()); - { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - bytes = batch_size_accountant_->HotBytes() + batch_size_accountant_->ColdBytes(); - } - { - absl::base_internal::SpinLockHolder lock(&stats_lock_); - batches_expired_++; - metrics_.batches_expired_counter.Increment(); - } - } - return Status::OK(); -} - -Status Table::WriteRowBatch(const schema::RowBatch& rb) { - // Don't write empty row batches. - if (rb.num_columns() == 0 || rb.ColumnAt(0)->length() == 0) { - return Status::OK(); - } - - internal::RecordOrRowBatch record_or_row_batch(rb); - - PX_RETURN_IF_ERROR(WriteHot(std::move(record_or_row_batch))); - return Status::OK(); -} - -Status Table::TransferRecordBatch( - std::unique_ptr record_batch) { - // Don't transfer over empty row batches. - if (record_batch->empty() || record_batch->at(0)->Size() == 0) { - return Status::OK(); - } - - auto record_batch_w_cache = internal::RecordBatchWithCache{ - std::move(record_batch), - std::vector(rel_.NumColumns()), - std::vector(rel_.NumColumns(), false), - }; - internal::RecordOrRowBatch record_or_row_batch(std::move(record_batch_w_cache)); - - PX_RETURN_IF_ERROR(WriteHot(std::move(record_or_row_batch))); - return Status::OK(); -} - -Status Table::WriteHot(internal::RecordOrRowBatch&& record_or_row_batch) { - // See BatchSizeAccountantNonMutableState for an explanation of the thread safety and necessity of - // NonMutableState. - auto batch_stats = internal::BatchSizeAccountant::CalcBatchStats( - ABSL_TS_UNCHECKED_READ(batch_size_accountant_)->NonMutableState(), record_or_row_batch); - - PX_RETURN_IF_ERROR(ExpireRowBatches(batch_stats.bytes)); - - { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - auto batch_length = record_or_row_batch.Length(); - batch_size_accountant_->NewHotBatch(std::move(batch_stats)); - hot_store_->EmplaceBack(next_row_id_, std::move(record_or_row_batch)); - next_row_id_ += batch_length; - } - - { - absl::base_internal::SpinLockHolder lock(&stats_lock_); - ++batches_added_; - metrics_.batches_added_counter.Increment(); - bytes_added_ += batch_stats.bytes; - metrics_.bytes_added_counter.Increment(batch_stats.bytes); - } - - // Make sure locks are released for this call, since they are reacquired inside. - PX_RETURN_IF_ERROR(UpdateTableMetricGauges()); - return Status::OK(); -} - -Table::RowID Table::FirstRowID() const { +Table::RowID HotColdTable::FirstRowID() const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); if (cold_store_->Size() > 0) { return cold_store_->FirstRowID(); @@ -333,7 +248,7 @@ Table::RowID Table::FirstRowID() const { return -1; } -Table::RowID Table::LastRowID() const { +Table::RowID HotColdTable::LastRowID() const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); if (hot_store_->Size() > 0) { @@ -345,7 +260,7 @@ Table::RowID Table::LastRowID() const { return -1; } -Table::Time Table::MaxTime() const { +Table::Time HotColdTable::MaxTime() const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); if (hot_store_->Size() > 0) { @@ -357,7 +272,7 @@ Table::Time Table::MaxTime() const { return -1; } -Table::RowID Table::FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const { +Table::RowID HotColdTable::FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); auto optional_row_id = cold_store_->FindRowIDFromTimeFirstGreaterThanOrEqual(time); if (optional_row_id.has_value()) { @@ -371,7 +286,7 @@ Table::RowID Table::FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const { return next_row_id_; } -Table::RowID Table::FindRowIDFromTimeFirstGreaterThan(Time time) const { +Table::RowID HotColdTable::FindRowIDFromTimeFirstGreaterThan(Time time) const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); auto optional_row_id = cold_store_->FindRowIDFromTimeFirstGreaterThan(time); if (optional_row_id.has_value()) { @@ -385,9 +300,7 @@ Table::RowID Table::FindRowIDFromTimeFirstGreaterThan(Time time) const { return next_row_id_; } -schema::Relation Table::GetRelation() const { return rel_; } - -TableStats Table::GetTableStats() const { +TableStats HotColdTable::GetTableStats() const { TableStats info; int64_t min_time = -1; int64_t num_batches = 0; @@ -421,7 +334,7 @@ TableStats Table::GetTableStats() const { return info; } -Status Table::CompactSingleBatchUnlocked(arrow::MemoryPool*) { +Status HotColdTable::CompactSingleBatchUnlocked(arrow::MemoryPool*) { const auto& compaction_spec = batch_size_accountant_->GetNextCompactedBatchSpec(); PX_RETURN_IF_ERROR( @@ -456,7 +369,7 @@ Status Table::CompactSingleBatchUnlocked(arrow::MemoryPool*) { return Status::OK(); } -Status Table::CompactHotToCold(arrow::MemoryPool* mem_pool) { +Status HotColdTable::CompactHotToCold(arrow::MemoryPool* mem_pool) { bool next_ready = false; { absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); @@ -476,7 +389,7 @@ Status Table::CompactHotToCold(arrow::MemoryPool* mem_pool) { return Status::OK(); } -StatusOr Table::ExpireCold() { +StatusOr HotColdTable::ExpireCold() { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); if (cold_store_->Size() == 0) { return false; @@ -487,17 +400,7 @@ StatusOr Table::ExpireCold() { return true; } -Status Table::ExpireHot() { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - if (hot_store_->Size() == 0) { - return error::InvalidArgument("Failed to expire row batch, no row batches in table"); - } - hot_store_->PopFront(); - batch_size_accountant_->ExpireHotBatch(); - return Status::OK(); -} - -Status Table::ExpireBatch() { +Status HotColdTable::ExpireBatch() { PX_ASSIGN_OR_RETURN(auto expired_cold, ExpireCold()); if (expired_cold) { return Status::OK(); @@ -507,7 +410,7 @@ Status Table::ExpireBatch() { return ExpireHot(); } -Status Table::UpdateTableMetricGauges() { +Status HotColdTable::UpdateTableMetricGauges() { // Update table-level gauge values. auto stats = GetTableStats(); // Set gauge values @@ -528,5 +431,162 @@ Status Table::UpdateTableMetricGauges() { return Status::OK(); } +HotOnlyTable::HotOnlyTable(std::string_view table_name, const schema::Relation& relation, + size_t max_table_size) + : Table(TableMetrics(&(GetMetricsRegistry()), std::string(table_name)), relation, + max_table_size) { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + for (const auto& [i, col_name] : Enumerate(rel_.col_names())) { + if (col_name == "time_" && rel_.GetColumnType(i) == types::DataType::TIME64NS) { + time_col_idx_ = i; + } + } + batch_size_accountant_ = + internal::BatchSizeAccountant::Create(rel_, FLAGS_table_store_table_size_limit); + // TODO(ddelnano): Move this into the base class constructor + hot_store_ = std::make_unique>( + rel_, time_col_idx_); +} + +StatusOr> HotOnlyTable::GetNextRowBatch( + Cursor* /*cursor*/, const std::vector& cols) const { + std::vector col_types; + for (int64_t col_idx : cols) { + DCHECK(static_cast(col_idx) < rel_.NumColumns()); + col_types.push_back(rel_.col_types()[col_idx]); + } + const auto row_desc = schema::RowDescriptor(col_types); + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + if (hot_store_->Size() == 0) { + return schema::RowBatch::WithZeroRows(row_desc, /* eow */ true, + /* eos */ true); + } + auto&& batch = hot_store_->PopFront(); + auto batch_size = batch.Length(); + auto rb = std::make_unique(row_desc, batch_size); + batch_size_accountant_->ExpireHotBatch(); + PX_RETURN_IF_ERROR(hot_store_->AddBatchSliceToRowBatch(batch, 0, batch_size, cols, rb.get())); + return rb; +} + +Table::RowID HotOnlyTable::FirstRowID() const { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + if (hot_store_->Size() > 0) { + return hot_store_->FirstRowID(); + } + return -1; +} + +Table::RowID HotOnlyTable::LastRowID() const { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + if (hot_store_->Size() > 0) { + return hot_store_->LastRowID(); + } + return -1; +} + +Table::RowID HotOnlyTable::FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + auto optional_row_id = hot_store_->FindRowIDFromTimeFirstGreaterThanOrEqual(time); + if (optional_row_id.has_value()) { + return optional_row_id.value(); + } + return next_row_id_; +} + +Table::RowID HotOnlyTable::FindRowIDFromTimeFirstGreaterThan(Time time) const { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + auto optional_row_id = hot_store_->FindRowIDFromTimeFirstGreaterThan(time); + if (optional_row_id.has_value()) { + return optional_row_id.value(); + } + return next_row_id_; +} + +Status HotOnlyTable::ToProto(table_store::schemapb::Table* table_proto) const { + CHECK(table_proto != nullptr); + std::vector col_selector; + for (int64_t i = 0; i < static_cast(rel_.NumColumns()); i++) { + col_selector.push_back(i); + } + + Cursor cursor(this); + while (!cursor.Done()) { + PX_ASSIGN_OR_RETURN(auto cur_rb, cursor.GetNextRowBatch(col_selector)); + auto eos = cursor.Done(); + cur_rb->set_eow(eos); + cur_rb->set_eos(eos); + PX_RETURN_IF_ERROR(cur_rb->ToProto(table_proto->add_row_batches())); + } + + PX_RETURN_IF_ERROR(rel_.ToProto(table_proto->mutable_relation())); + return Status::OK(); +} + +TableStats HotOnlyTable::GetTableStats() const { + TableStats info; + int64_t min_time = -1; + int64_t num_batches = 0; + int64_t hot_bytes = 0; + int64_t cold_bytes = 0; + { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + num_batches += hot_store_->Size(); + hot_bytes = batch_size_accountant_->HotBytes(); + if (min_time == -1) { + min_time = hot_store_->MinTime(); + } + } + absl::base_internal::SpinLockHolder lock(&stats_lock_); + + info.batches_added = batches_added_; + info.batches_expired = batches_expired_; + info.bytes_added = bytes_added_; + info.num_batches = num_batches; + info.bytes = hot_bytes + cold_bytes; + info.hot_bytes = hot_bytes; + info.cold_bytes = cold_bytes; + info.compacted_batches = compacted_batches_; + info.max_table_size = max_table_size_; + info.min_time = min_time; + + return info; +} + +Status HotOnlyTable::CompactHotToCold(arrow::MemoryPool* /*mem_pool*/) { + LOG(INFO) << "Skipping compaction for HotOnlyTable"; + return Status::OK(); +} + +Table::Time HotOnlyTable::MaxTime() const { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + if (hot_store_->Size() > 0) { + return hot_store_->MaxTime(); + } + return -1; +} + +Status HotOnlyTable::ExpireBatch() { return ExpireHot(); } + +Status HotOnlyTable::UpdateTableMetricGauges() { + // Update table-level gauge values. + auto stats = GetTableStats(); + // Set gauge values + metrics_.hot_bytes_gauge.Set(stats.hot_bytes); + metrics_.num_batches_gauge.Set(stats.num_batches); + metrics_.max_table_size_gauge.Set(stats.max_table_size); + // Compute retention gauge + int64_t current_retention_ns = 0; + // If min_time is 0, there is no data in the table. + if (stats.min_time > 0) { + int64_t current_time_ns = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + current_retention_ns = current_time_ns - stats.min_time; + } + metrics_.retention_ns_gauge.Set(current_retention_ns); + return Status::OK(); +} + } // namespace table_store } // namespace px diff --git a/src/table_store/table/table.h b/src/table_store/table/table.h index c82e9b4e6d8..a26accd562c 100644 --- a/src/table_store/table/table.h +++ b/src/table_store/table/table.h @@ -68,6 +68,316 @@ struct TableStats { int64_t min_time; }; +class Table; + +/** + * Cursor allows iterating the table, while guaranteeing that no row is returned twice (even when + * compactions occur between accesses). {Start,Stop}Spec specify what rows the cursor should begin + * and end at when iterating the cursor. + */ +class Cursor { + using Time = internal::Time; + using RowID = internal::RowID; + + public: + /** + * StartSpec defines where a Cursor should begin within the table. Current options are to start + * at a given time, or start at the first row currently in the table. + */ + struct StartSpec { + enum StartType { + StartAtTime, + CurrentStartOfTable, + }; + StartType type = CurrentStartOfTable; + Time start_time = -1; + }; + + /** + * StopSpec defines when a Cursor should stop and be considered exhausted. Current options are + * to stop at a given time, stop at the last row currently in the table, or infinite (i.e. the + * Cursor never becomes exhausted). + */ + struct StopSpec { + enum StopType { + // Iterating a StopAtTime cursor will return all records with `timestamp <= stop_time`. + // The cursor will not be considered `Done()` until a record with `timestamp > stop_time` is + // added to the table. + // Note that StopAtTime is the most expensive of the StopTypes because it requires holding a + // table lock very briefly on each call to `Done()` or `NextBatchReady()` + StopAtTime, + // Iterating a StopAtTimeOrEndOfTable cursor will return all records with `timestamp <= + // stop_time` that existed in the table at the time of cursor creation. The cursor will be + // considered `Done()` once all records with `timestamp <= stop_time` have been consumed or + // when the end of the table is reached (end of the table is determined at cursor creation + // time). + StopAtTimeOrEndOfTable, + // Iterating a CurrentEndOfTable cursor will return all records in the table at cursor + // creation time. + CurrentEndOfTable, + // An Infinite cursor will never be considered `Done()`. + Infinite, + }; + StopType type = CurrentEndOfTable; + // Only valid for StopAtTime or StopAtTimeOrEndOfTable types. + Time stop_time = -1; + }; + + explicit Cursor(const Table* table) : Cursor(table, StartSpec{}, StopSpec{}) {} + Cursor(const Table* table, StartSpec start, StopSpec stop); + + // In the case of StopType == Infinite or StopType == StopAtTime, this returns whether the table + // has the next batch ready. In the case of StopType == CurrentEndOfTable, this returns !Done(). + // Note that `NextBatchReady() == true` doesn't guarantee that `GetNextRowBatch` will succeed. + // For instance, the desired row batch could have been expired between the call to + // `NextBatchReady()` and `GetNextRowBatch(...)`, and then the row batch after the expired one + // is past the stopping condition. In this case `GetNextRowBatch(...)` will return an error. + bool NextBatchReady(); + StatusOr> GetNextRowBatch(const std::vector& cols); + // In the case of StopType == Infinite, this function always returns false. + bool Done(); + // Change the StopSpec of the cursor. + void UpdateStopSpec(StopSpec stop); + + private: + void AdvanceToStart(const StartSpec& start); + void StopStateFromSpec(StopSpec&& stop); + void UpdateStopStateForStopAtTime(); + + // The following methods are made private so that they are only accessible from Table. + internal::RowID* LastReadRowID(); + internal::BatchHints* Hints(); + std::optional StopRowID() const; + + struct StopState { + StopSpec spec; + RowID stop_row_id; + // If StopSpec.type is StopAtTime, then stop_row_id doesn't become finalized until the time is + // within the table. This bool keeps track of when that happens. + bool stop_row_id_final = false; + }; + const Table* table_; + internal::BatchHints hints_; + RowID last_read_row_id_; + StopState stop_; + + friend class Table; + friend class HotColdTable; + friend class HotOnlyTable; +}; + +class Table : public NotCopyable { + public: + using RecordBatchPtr = internal::RecordBatchPtr; + using ArrowArrayPtr = internal::ArrowArrayPtr; + using ColdBatch = internal::ColdBatch; + using Time = internal::Time; + using TimeInterval = internal::TimeInterval; + using RowID = internal::RowID; + using RowIDInterval = internal::RowIDInterval; + using BatchID = internal::BatchID; + + Table() = delete; + virtual ~Table() = default; + + schema::Relation GetRelation() const { return rel_; } + + /** + * Get a RowBatch of data corresponding to the next data after the given cursor. + * @param cursor the Cursor to get the next row batch after. + * @param cols a vector of column indices to get data for. + * @return a unique ptr to a RowBatch with the requested data. + */ + virtual StatusOr> GetNextRowBatch( + Cursor* cursor, const std::vector& cols) const = 0; + + /** + * Get the unique identifier of the first row in the table. + * If all the data is expired from the table, this returns the last row id that was in the table. + * @return unique identifier of the first row. + */ + virtual RowID FirstRowID() const = 0; + + /** + * Get the unique identifier of the last row in the table. + * If all the data is expired from the table, this returns the last row id that was in the table. + * @return unique identifier of the last row. + */ + virtual RowID LastRowID() const = 0; + + /** + * Find the unique identifier of the first row for which its corresponding time is greater than or + * equal to the given time. + * @param time the time to search for. + * @return unique identifier of the first row with time greater than or equal to the given time. + */ + virtual RowID FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const = 0; + + /** + * Find the unique identifier of the first row for which its corresponding time is greater than + * the given time. + * @param time the time to search for. + * @return unique identifier of the first row with time greater than the given time. + */ + virtual RowID FindRowIDFromTimeFirstGreaterThan(Time time) const = 0; + + /** + * Covert the table and store in passed in proto. + * @param table_proto The table proto to write to. + * @return Status of conversion. + */ + virtual Status ToProto(table_store::schemapb::Table* table_proto) const = 0; + + virtual TableStats GetTableStats() const = 0; + + /** + * Compacts hot batches into compacted_batch_size_ sized cold batches. Each call to + * CompactHotToCold will create a maximum of kMaxBatchesPerCompactionCall cold batches. + * @param mem_pool arrow MemoryPool to be used for creating new cold batches. + */ + virtual Status CompactHotToCold(arrow::MemoryPool* mem_pool) = 0; + + /** + * Transfers the given record batch (from Stirling) into the Table. + * + * @param record_batch the record batch to be appended to the Table. + * @return status + */ + Status TransferRecordBatch(std::unique_ptr record_batch) { + // Don't transfer over empty row batches. + if (record_batch->empty() || record_batch->at(0)->Size() == 0) { + return Status::OK(); + } + + auto record_batch_w_cache = internal::RecordBatchWithCache{ + std::move(record_batch), + std::vector(rel_.NumColumns()), + std::vector(rel_.NumColumns(), false), + }; + internal::RecordOrRowBatch record_or_row_batch(std::move(record_batch_w_cache)); + + PX_RETURN_IF_ERROR(WriteHot(std::move(record_or_row_batch))); + return Status::OK(); + } + + /** + * Writes a row batch to the table. + * @param rb Rowbatch to write to the table. + */ + Status WriteRowBatch(const schema::RowBatch& rb) { + // Don't write empty row batches. + if (rb.num_columns() == 0 || rb.ColumnAt(0)->length() == 0) { + return Status::OK(); + } + + internal::RecordOrRowBatch record_or_row_batch(rb); + + PX_RETURN_IF_ERROR(WriteHot(std::move(record_or_row_batch))); + return Status::OK(); + } + + protected: + virtual Time MaxTime() const = 0; + + virtual Status ExpireBatch() = 0; + + virtual Status UpdateTableMetricGauges() = 0; + + Table(TableMetrics metrics, const schema::Relation& relation, size_t max_table_size) + : metrics_(metrics), rel_(relation), max_table_size_(max_table_size) {} + + Status ExpireHot() { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + if (hot_store_->Size() == 0) { + return error::InvalidArgument("Failed to expire row batch, no row batches in table"); + } + hot_store_->PopFront(); + batch_size_accountant_->ExpireHotBatch(); + return Status::OK(); + } + + Status ExpireRowBatches(int64_t row_batch_size) { + if (row_batch_size > max_table_size_) { + return error::InvalidArgument("RowBatch size ($0) is bigger than maximum table size ($1).", + row_batch_size, max_table_size_); + } + int64_t bytes; + { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + bytes = batch_size_accountant_->HotBytes() + batch_size_accountant_->ColdBytes(); + } + while (bytes + row_batch_size > max_table_size_) { + PX_RETURN_IF_ERROR(ExpireBatch()); + { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + bytes = batch_size_accountant_->HotBytes() + batch_size_accountant_->ColdBytes(); + } + { + absl::base_internal::SpinLockHolder lock(&stats_lock_); + batches_expired_++; + metrics_.batches_expired_counter.Increment(); + } + } + return Status::OK(); + } + + Status WriteHot(internal::RecordOrRowBatch&& record_or_row_batch) { + // See BatchSizeAccountantNonMutableState for an explanation of the thread safety and necessity + // of NonMutableState. + auto batch_stats = internal::BatchSizeAccountant::CalcBatchStats( + ABSL_TS_UNCHECKED_READ(batch_size_accountant_)->NonMutableState(), record_or_row_batch); + + PX_RETURN_IF_ERROR(ExpireRowBatches(batch_stats.bytes)); + + { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + auto batch_length = record_or_row_batch.Length(); + batch_size_accountant_->NewHotBatch(std::move(batch_stats)); + hot_store_->EmplaceBack(next_row_id_, std::move(record_or_row_batch)); + next_row_id_ += batch_length; + } + + { + absl::base_internal::SpinLockHolder lock(&stats_lock_); + ++batches_added_; + metrics_.batches_added_counter.Increment(); + bytes_added_ += batch_stats.bytes; + metrics_.bytes_added_counter.Increment(batch_stats.bytes); + } + + // Make sure locks are released for this call, since they are reacquired inside. + PX_RETURN_IF_ERROR(UpdateTableMetricGauges()); + return Status::OK(); + } + + mutable absl::base_internal::SpinLock hot_lock_; + + TableMetrics metrics_; + + schema::Relation rel_; + + int64_t max_table_size_ = 0; + + int64_t time_col_idx_ = -1; + + mutable absl::base_internal::SpinLock stats_lock_; + int64_t batches_expired_ ABSL_GUARDED_BY(stats_lock_) = 0; + int64_t batches_added_ ABSL_GUARDED_BY(stats_lock_) = 0; + int64_t bytes_added_ ABSL_GUARDED_BY(stats_lock_) = 0; + int64_t compacted_batches_ ABSL_GUARDED_BY(stats_lock_) = 0; + + std::unique_ptr> hot_store_ + ABSL_GUARDED_BY(hot_lock_); + + // Counter to assign a unique row ID to each row. Synchronized by hot_lock_ since its only + // accessed on a hot write. + int64_t next_row_id_ ABSL_GUARDED_BY(hot_lock_) = 0; + + std::unique_ptr batch_size_accountant_ ABSL_GUARDED_BY(hot_lock_); + + friend class Cursor; +}; + /** * Table stores data in two separate partitions, hot and cold. Hot data is "hot" from the * perspective of writes, in other words data is first written to the hot partitiion, and then later @@ -101,7 +411,7 @@ struct TableStats { * that when GetNextRowBatch is called on the cursor it can work out that it needs to return a slice * of the batch with the original "second" batch's data. */ -class Table : public NotCopyable { +class HotColdTable : public Table { using RecordBatchPtr = internal::RecordBatchPtr; using ArrowArrayPtr = internal::ArrowArrayPtr; using ColdBatch = internal::ColdBatch; @@ -111,6 +421,9 @@ class Table : public NotCopyable { using RowIDInterval = internal::RowIDInterval; using BatchID = internal::BatchID; + // TODO(ddelnano): Maybe this should be removed + friend class Cursor; + static inline constexpr int64_t kDefaultColdBatchMinSize = 64 * 1024; public: @@ -120,100 +433,9 @@ class Table : public NotCopyable { const schema::Relation& relation) { // Create naked pointer, because std::make_shared() cannot access the private ctor. return std::shared_ptr
( - new Table(table_name, relation, FLAGS_table_store_table_size_limit)); + new HotColdTable(table_name, relation, FLAGS_table_store_table_size_limit)); } - /** - * Cursor allows iterating the table, while guaranteeing that no row is returned twice (even when - * compactions occur between accesses). {Start,Stop}Spec specify what rows the cursor should begin - * and end at when iterating the cursor. - */ - class Cursor { - public: - /** - * StartSpec defines where a Cursor should begin within the table. Current options are to start - * at a given time, or start at the first row currently in the table. - */ - struct StartSpec { - enum StartType { - StartAtTime, - CurrentStartOfTable, - }; - StartType type = CurrentStartOfTable; - Time start_time = -1; - }; - - /** - * StopSpec defines when a Cursor should stop and be considered exhausted. Current options are - * to stop at a given time, stop at the last row currently in the table, or infinite (i.e. the - * Cursor never becomes exhausted). - */ - struct StopSpec { - enum StopType { - // Iterating a StopAtTime cursor will return all records with `timestamp <= stop_time`. - // The cursor will not be considered `Done()` until a record with `timestamp > stop_time` is - // added to the table. - // Note that StopAtTime is the most expensive of the StopTypes because it requires holding a - // table lock very briefly on each call to `Done()` or `NextBatchReady()` - StopAtTime, - // Iterating a StopAtTimeOrEndOfTable cursor will return all records with `timestamp <= - // stop_time` that existed in the table at the time of cursor creation. The cursor will be - // considered `Done()` once all records with `timestamp <= stop_time` have been consumed or - // when the end of the table is reached (end of the table is determined at cursor creation - // time). - StopAtTimeOrEndOfTable, - // Iterating a CurrentEndOfTable cursor will return all records in the table at cursor - // creation time. - CurrentEndOfTable, - // An Infinite cursor will never be considered `Done()`. - Infinite, - }; - StopType type = CurrentEndOfTable; - // Only valid for StopAtTime or StopAtTimeOrEndOfTable types. - Time stop_time = -1; - }; - - explicit Cursor(const Table* table) : Cursor(table, StartSpec{}, StopSpec{}) {} - Cursor(const Table* table, StartSpec start, StopSpec stop); - - // In the case of StopType == Infinite or StopType == StopAtTime, this returns whether the table - // has the next batch ready. In the case of StopType == CurrentEndOfTable, this returns !Done(). - // Note that `NextBatchReady() == true` doesn't guarantee that `GetNextRowBatch` will succeed. - // For instance, the desired row batch could have been expired between the call to - // `NextBatchReady()` and `GetNextRowBatch(...)`, and then the row batch after the expired one - // is past the stopping condition. In this case `GetNextRowBatch(...)` will return an error. - bool NextBatchReady(); - StatusOr> GetNextRowBatch(const std::vector& cols); - // In the case of StopType == Infinite, this function always returns false. - bool Done(); - // Change the StopSpec of the cursor. - void UpdateStopSpec(StopSpec stop); - - private: - void AdvanceToStart(const StartSpec& start); - void StopStateFromSpec(StopSpec&& stop); - void UpdateStopStateForStopAtTime(); - - // The following methods are made private so that they are only accessible from Table. - internal::RowID* LastReadRowID(); - internal::BatchHints* Hints(); - std::optional StopRowID() const; - - struct StopState { - StopSpec spec; - RowID stop_row_id; - // If StopSpec.type is StopAtTime, then stop_row_id doesn't become finalized until the time is - // within the table. This bool keeps track of when that happens. - bool stop_row_id_final = false; - }; - const Table* table_; - internal::BatchHints hints_; - RowID last_read_row_id_; - StopState stop_; - - friend class Table; - }; - /** * @brief Construct a new Table object along with its columns. Can be used to create * a table (along with columns) based on a subscription message from Stirling. @@ -222,35 +444,35 @@ class Table : public NotCopyable { * @param max_table_size the maximum number of bytes that the table can hold. This is limitless * (-1) by default. */ - explicit Table(std::string_view table_name, const schema::Relation& relation, - size_t max_table_size) - : Table(table_name, relation, max_table_size, kDefaultColdBatchMinSize) {} + explicit HotColdTable(std::string_view table_name, const schema::Relation& relation, + size_t max_table_size) + : HotColdTable(table_name, relation, max_table_size, kDefaultColdBatchMinSize) {} - Table(std::string_view table_name, const schema::Relation& relation, size_t max_table_size, - size_t compacted_batch_size_); + HotColdTable(std::string_view table_name, const schema::Relation& relation, size_t max_table_size, + size_t compacted_batch_size_); /** * Get a RowBatch of data corresponding to the next data after the given cursor. - * @param cursor the Table::Cursor to get the next row batch after. + * @param cursor the Cursor to get the next row batch after. * @param cols a vector of column indices to get data for. * @return a unique ptr to a RowBatch with the requested data. */ StatusOr> GetNextRowBatch( - Cursor* cursor, const std::vector& cols) const; + Cursor* cursor, const std::vector& cols) const override; /** * Get the unique identifier of the first row in the table. * If all the data is expired from the table, this returns the last row id that was in the table. * @return unique identifier of the first row. */ - RowID FirstRowID() const; + RowID FirstRowID() const override; /** * Get the unique identifier of the last row in the table. * If all the data is expired from the table, this returns the last row id that was in the table. * @return unique identifier of the last row. */ - RowID LastRowID() const; + RowID LastRowID() const override; /** * Find the unique identifier of the first row for which its corresponding time is greater than or @@ -258,7 +480,7 @@ class Table : public NotCopyable { * @param time the time to search for. * @return unique identifier of the first row with time greater than or equal to the given time. */ - RowID FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const; + RowID FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const override; /** * Find the unique identifier of the first row for which its corresponding time is greater than @@ -266,13 +488,7 @@ class Table : public NotCopyable { * @param time the time to search for. * @return unique identifier of the first row with time greater than the given time. */ - RowID FindRowIDFromTimeFirstGreaterThan(Time time) const; - - /** - * Writes a row batch to the table. - * @param rb Rowbatch to write to the table. - */ - Status WriteRowBatch(const schema::RowBatch& rb); + RowID FindRowIDFromTimeFirstGreaterThan(Time time) const override; /** * Transfers the given record batch (from Stirling) into the Table. @@ -280,68 +496,139 @@ class Table : public NotCopyable { * @param record_batch the record batch to be appended to the Table. * @return status */ - Status TransferRecordBatch(std::unique_ptr record_batch); - - schema::Relation GetRelation() const; - StatusOr> GetTableAsRecordBatches() const; + /* Status TransferRecordBatch(std::unique_ptr record_batch) + * override; */ /** * Covert the table and store in passed in proto. * @param table_proto The table proto to write to. * @return Status of conversion. */ - Status ToProto(table_store::schemapb::Table* table_proto) const; + Status ToProto(table_store::schemapb::Table* table_proto) const override; - TableStats GetTableStats() const; + TableStats GetTableStats() const override; /** * Compacts hot batches into compacted_batch_size_ sized cold batches. Each call to * CompactHotToCold will create a maximum of kMaxBatchesPerCompactionCall cold batches. * @param mem_pool arrow MemoryPool to be used for creating new cold batches. */ - Status CompactHotToCold(arrow::MemoryPool* mem_pool); + Status CompactHotToCold(arrow::MemoryPool* mem_pool) override; private: - TableMetrics metrics_; + Time MaxTime() const override; - schema::Relation rel_; + Status ExpireBatch() override; + + Status UpdateTableMetricGauges() override; - mutable absl::base_internal::SpinLock stats_lock_; - int64_t batches_expired_ ABSL_GUARDED_BY(stats_lock_) = 0; - int64_t batches_added_ ABSL_GUARDED_BY(stats_lock_) = 0; - int64_t bytes_added_ ABSL_GUARDED_BY(stats_lock_) = 0; - int64_t compacted_batches_ ABSL_GUARDED_BY(stats_lock_) = 0; - int64_t max_table_size_ = 0; const int64_t compacted_batch_size_; - mutable absl::base_internal::SpinLock hot_lock_; - std::unique_ptr> hot_store_ - ABSL_GUARDED_BY(hot_lock_); mutable absl::base_internal::SpinLock cold_lock_; std::unique_ptr> cold_store_ ABSL_GUARDED_BY(cold_lock_); std::deque cold_batch_bytes_ ABSL_GUARDED_BY(cold_lock_); - // Counter to assign a unique row ID to each row. Synchronized by hot_lock_ since its only - // accessed on a hot write. - int64_t next_row_id_ ABSL_GUARDED_BY(hot_lock_) = 0; - int64_t time_col_idx_ = -1; - - Status WriteHot(internal::RecordOrRowBatch&& record_or_row_batch); - - Status ExpireBatch(); - Status ExpireHot(); StatusOr ExpireCold(); - Status ExpireRowBatches(int64_t row_batch_size); Status CompactSingleBatchUnlocked(arrow::MemoryPool* mem_pool) ABSL_EXCLUSIVE_LOCKS_REQUIRED(cold_lock_) ABSL_EXCLUSIVE_LOCKS_REQUIRED(hot_lock_); - Status UpdateTableMetricGauges(); - Time MaxTime() const; + internal::ArrowArrayCompactor compactor_; +}; - std::unique_ptr batch_size_accountant_ ABSL_GUARDED_BY(hot_lock_); +class HotOnlyTable : public Table { + using RowID = internal::RowID; - internal::ArrowArrayCompactor compactor_; + public: + using StopPosition = int64_t; + static inline std::shared_ptr
Create(std::string_view table_name, + const schema::Relation& relation) { + // Create naked pointer, because std::make_shared() cannot access the private ctor. + return std::shared_ptr
( + new HotOnlyTable(table_name, relation, FLAGS_table_store_table_size_limit)); + } + + /** + * @brief Construct a new Table object along with its columns. Can be used to create + * a table (along with columns) based on a subscription message from Stirling. + * + * @param relation the relation for the table. + * @param max_table_size the maximum number of bytes that the table can hold. This is limitless + * (-1) by default. + */ + explicit HotOnlyTable(std::string_view table_name, const schema::Relation& relation, + size_t max_table_size); + + /** + * Get a RowBatch of data corresponding to the next data after the given cursor. + * @param cursor the Cursor to get the next row batch after. + * @param cols a vector of column indices to get data for. + * @return a unique ptr to a RowBatch with the requested data. + */ + StatusOr> GetNextRowBatch( + Cursor* cursor, const std::vector& cols) const override; + + /** + * Get the unique identifier of the first row in the table. + * If all the data is expired from the table, this returns the last row id that was in the table. + * @return unique identifier of the first row. + */ + RowID FirstRowID() const override; + + /** + * Get the unique identifier of the last row in the table. + * If all the data is expired from the table, this returns the last row id that was in the table. + * @return unique identifier of the last row. + */ + RowID LastRowID() const override; + + /** + * Find the unique identifier of the first row for which its corresponding time is greater than or + * equal to the given time. + * @param time the time to search for. + * @return unique identifier of the first row with time greater than or equal to the given time. + */ + RowID FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const override; + + /** + * Find the unique identifier of the first row for which its corresponding time is greater than + * the given time. + * @param time the time to search for. + * @return unique identifier of the first row with time greater than the given time. + */ + RowID FindRowIDFromTimeFirstGreaterThan(Time time) const override; + + /** + * Transfers the given record batch (from Stirling) into the Table. + * + * @param record_batch the record batch to be appended to the Table. + * @return status + */ + /* Status TransferRecordBatch(std::unique_ptr record_batch) + * override; */ + + /** + * Covert the table and store in passed in proto. + * @param table_proto The table proto to write to. + * @return Status of conversion. + */ + Status ToProto(table_store::schemapb::Table* table_proto) const override; + + TableStats GetTableStats() const override; + + /** + * Compacts hot batches into compacted_batch_size_ sized cold batches. Each call to + * CompactHotToCold will create a maximum of kMaxBatchesPerCompactionCall cold batches. + * @param mem_pool arrow MemoryPool to be used for creating new cold batches. + */ + Status CompactHotToCold(arrow::MemoryPool* mem_pool) override; + + private: + Time MaxTime() const override; + + Status ExpireBatch() override; + + Status UpdateTableMetricGauges() override; friend class Cursor; }; diff --git a/src/table_store/table/table_benchmark.cc b/src/table_store/table/table_benchmark.cc index 8c65271fe2e..22eb1357a16 100644 --- a/src/table_store/table/table_benchmark.cc +++ b/src/table_store/table/table_benchmark.cc @@ -34,7 +34,7 @@ static inline std::unique_ptr
MakeTable(int64_t max_size, int64_t compact schema::Relation rel( std::vector({types::DataType::TIME64NS, types::DataType::FLOAT64}), std::vector({"time_", "float"})); - return std::make_unique
("test_table", rel, max_size, compaction_size); + return std::make_unique("test_table", rel, max_size, compaction_size); } static inline std::unique_ptr MakeHotBatch(int64_t batch_size, @@ -82,7 +82,7 @@ static inline int64_t FillTableCold(Table* table, int64_t table_size, int64_t ba return time_counter; } -static inline void ReadFullTable(Table::Cursor* cursor) { +static inline void ReadFullTable(Cursor* cursor) { while (!cursor->Done()) { benchmark::DoNotOptimize(cursor->GetNextRowBatch({0, 1})); } @@ -98,14 +98,14 @@ static void BM_TableReadAllHot(benchmark::State& state) { CHECK_EQ(table->GetTableStats().bytes, table_size); - Table::Cursor cursor(table.get()); + Cursor cursor(table.get()); for (auto _ : state) { ReadFullTable(&cursor); state.PauseTiming(); table = MakeTable(table_size, compaction_size); FillTableHot(table.get(), table_size, batch_length); - cursor = Table::Cursor(table.get()); + cursor = Cursor(table.get()); state.ResumeTiming(); } @@ -120,25 +120,25 @@ static void BM_TableReadAllCold(benchmark::State& state) { auto table = MakeTable(table_size, compaction_size); FillTableCold(table.get(), table_size, batch_length); CHECK_EQ(table->GetTableStats().bytes, table_size); - Table::Cursor cursor(table.get()); + Cursor cursor(table.get()); for (auto _ : state) { ReadFullTable(&cursor); state.PauseTiming(); - cursor = Table::Cursor(table.get()); + cursor = Cursor(table.get()); state.ResumeTiming(); } state.SetBytesProcessed(state.iterations() * table_size); } -Table::Cursor GetLastBatchCursor(Table* table, int64_t last_time, int64_t batch_length, - const std::vector& cols) { - Table::Cursor cursor(table, - Table::Cursor::StartSpec{Table::Cursor::StartSpec::StartType::StartAtTime, - last_time - 2 * batch_length}, - Table::Cursor::StopSpec{}); +Cursor GetLastBatchCursor(Table* table, int64_t last_time, int64_t batch_length, + const std::vector& cols) { + Cursor cursor( + table, + Cursor::StartSpec{Cursor::StartSpec::StartType::StartAtTime, last_time - 2 * batch_length}, + Cursor::StopSpec{}); // Advance the cursor so that it points to the last batch and has BatchHints set. cursor.GetNextRowBatch(cols); return cursor; @@ -238,7 +238,7 @@ static void BM_TableWriteFull(benchmark::State& state) { // NOLINTNEXTLINE : runtime/references. static void BM_TableCompaction(benchmark::State& state) { int64_t compaction_size = 64 * 1024; - int64_t table_size = Table::kMaxBatchesPerCompactionCall * compaction_size; + int64_t table_size = HotColdTable::kMaxBatchesPerCompactionCall * compaction_size; int64_t batch_length = 256; auto table = MakeTable(table_size, compaction_size); // Fill table first to make sure each compaction hits kMaxBatchesPerCompaction. @@ -254,7 +254,7 @@ static void BM_TableCompaction(benchmark::State& state) { } state.SetBytesProcessed(state.iterations() * compaction_size * - Table::kMaxBatchesPerCompactionCall); + HotColdTable::kMaxBatchesPerCompactionCall); } // NOLINTNEXTLINE : runtime/references. @@ -262,7 +262,7 @@ static void BM_TableThreaded(benchmark::State& state) { schema::Relation rel({types::DataType::TIME64NS}, {"time_"}); schema::RowDescriptor rd({types::DataType::TIME64NS}); std::shared_ptr
table_ptr = - std::make_shared
("test_table", rel, 16 * 1024 * 1024, 5 * 1024); + std::make_shared("test_table", rel, 16 * 1024 * 1024, 5 * 1024); int64_t batch_size = 1024; int64_t num_batches = 16 * 1024; @@ -309,7 +309,7 @@ static void BM_TableThreaded(benchmark::State& state) { int64_t batch_counter = 0; while (batch_counter < (num_batches / num_read_threads)) { - Table::Cursor cursor(table_ptr.get()); + Cursor cursor(table_ptr.get()); auto start = std::chrono::high_resolution_clock::now(); auto batch_or_s = cursor.GetNextRowBatch({0}); auto end = std::chrono::high_resolution_clock::now(); diff --git a/src/table_store/table/table_store.cc b/src/table_store/table/table_store.cc index e7ed6319b87..3bcdacbc69d 100644 --- a/src/table_store/table/table_store.cc +++ b/src/table_store/table/table_store.cc @@ -43,7 +43,7 @@ StatusOr TableStore::CreateNewTablet(uint64_t table_id, const types::Tab const TableInfo& table_info = id_to_table_info_map_iter->second; const schema::Relation& relation = table_info.relation; - std::shared_ptr
new_tablet = Table::Create(table_info.table_name, relation); + std::shared_ptr
new_tablet = HotColdTable::Create(table_info.table_name, relation); TableIDTablet id_key = {table_id, tablet_id}; id_to_table_map_[id_key] = new_tablet; diff --git a/src/table_store/table/table_store_test.cc b/src/table_store/table/table_store_test.cc index 8bd3ca761cc..8e43d3f0fdd 100644 --- a/src/table_store/table/table_store_test.cc +++ b/src/table_store/table/table_store_test.cc @@ -42,8 +42,8 @@ class TableStoreTest : public ::testing::Test { rel2 = schema::Relation({types::DataType::INT64, types::DataType::FLOAT64, types::DataType::INT64}, {"table2col1", "table2col2", "table2col3"}); - table1 = Table::Create("test_table1", rel1); - table2 = Table::Create("test_table2", rel2); + table1 = HotColdTable::Create("test_table1", rel1); + table2 = HotColdTable::Create("test_table2", rel2); } std::unique_ptr MakeRel1ColumnWrapperBatch() { @@ -208,9 +208,9 @@ class TableStoreTabletsTest : public TableStoreTest { protected: void SetUp() override { TableStoreTest::SetUp(); - tablet1_1 = Table::Create("test_table1", rel1); - tablet1_2 = Table::Create("test_table1", rel1); - tablet2_1 = Table::Create("test_table2", rel2); + tablet1_1 = HotColdTable::Create("test_table1", rel1); + tablet1_2 = HotColdTable::Create("test_table1", rel1); + tablet2_1 = HotColdTable::Create("test_table2", rel2); } std::shared_ptr
tablet1_1; diff --git a/src/table_store/table/table_test.cc b/src/table_store/table/table_test.cc index 10d1eed6b44..6a2617098e6 100644 --- a/src/table_store/table/table_test.cc +++ b/src/table_store/table/table_test.cc @@ -37,7 +37,28 @@ namespace { // TOOD(zasgar): deduplicate this with exec/test_utils. std::shared_ptr
TestTable() { schema::Relation rel({types::DataType::FLOAT64, types::DataType::INT64}, {"col1", "col2"}); - auto table = Table::Create("test_table", rel); + auto table = HotColdTable::Create("test_table", rel); + + auto rb1 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 3); + std::vector col1_in1 = {0.5, 1.2, 5.3}; + std::vector col2_in1 = {1, 2, 3}; + PX_CHECK_OK(rb1.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); + PX_CHECK_OK(rb1.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); + PX_CHECK_OK(table->WriteRowBatch(rb1)); + + auto rb2 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 2); + std::vector col1_in2 = {0.1, 5.1}; + std::vector col2_in2 = {5, 6}; + PX_CHECK_OK(rb2.AddColumn(types::ToArrow(col1_in2, arrow::default_memory_pool()))); + PX_CHECK_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); + PX_CHECK_OK(table->WriteRowBatch(rb2)); + + return table; +} + +std::shared_ptr
HotOnlyTestTable() { + schema::Relation rel({types::DataType::FLOAT64, types::DataType::INT64}, {"col1", "col2"}); + auto table = HotOnlyTable::Create("test_table", rel); auto rb1 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {0.5, 1.2, 5.3}; @@ -61,7 +82,42 @@ std::shared_ptr
TestTable() { TEST(TableTest, basic_test) { schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - std::shared_ptr
table_ptr = Table::Create("test_table", rel); + std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); + Table& table = *table_ptr; + + auto rb1 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 3); + std::vector col1_in1 = {true, false, true}; + std::vector col2_in1 = {1, 2, 3}; + EXPECT_OK(rb1.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); + EXPECT_OK(rb1.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); + EXPECT_OK(table.WriteRowBatch(rb1)); + + auto rb2 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 2); + std::vector col1_in2 = {false, false}; + std::vector col2_in2 = {5, 6}; + EXPECT_OK(rb2.AddColumn(types::ToArrow(col1_in2, arrow::default_memory_pool()))); + EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); + EXPECT_OK(table.WriteRowBatch(rb2)); + + Cursor cursor(table_ptr.get()); + + auto actual_rb1 = cursor.GetNextRowBatch(std::vector({0, 1})).ConsumeValueOrDie(); + EXPECT_TRUE( + actual_rb1->ColumnAt(0)->Equals(types::ToArrow(col1_in1, arrow::default_memory_pool()))); + EXPECT_TRUE( + actual_rb1->ColumnAt(1)->Equals(types::ToArrow(col2_in1, arrow::default_memory_pool()))); + + auto actual_rb2 = cursor.GetNextRowBatch(std::vector({0, 1})).ConsumeValueOrDie(); + EXPECT_TRUE( + actual_rb2->ColumnAt(0)->Equals(types::ToArrow(col1_in2, arrow::default_memory_pool()))); + EXPECT_TRUE( + actual_rb2->ColumnAt(1)->Equals(types::ToArrow(col2_in2, arrow::default_memory_pool()))); +} + +TEST(TableTest, HotOnlyTable_basic_test) { + schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); + + std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); Table& table = *table_ptr; auto rb1 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 3); @@ -78,7 +134,7 @@ TEST(TableTest, basic_test) { EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); EXPECT_OK(table.WriteRowBatch(rb2)); - Table::Cursor cursor(table_ptr.get()); + Cursor cursor(table_ptr.get()); auto actual_rb1 = cursor.GetNextRowBatch(std::vector({0, 1})).ConsumeValueOrDie(); EXPECT_TRUE( @@ -97,7 +153,60 @@ TEST(TableTest, bytes_test) { auto rd = schema::RowDescriptor({types::DataType::INT64, types::DataType::STRING}); schema::Relation rel(rd.types(), {"col1", "col2"}); - std::shared_ptr
table_ptr = Table::Create("test_table", rel); + std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); + Table& table = *table_ptr; + + schema::RowBatch rb1(rd, 3); + std::vector col1_rb1 = {4, 5, 10}; + std::vector col2_rb1 = {"hello", "abc", "defg"}; + auto col1_rb1_arrow = types::ToArrow(col1_rb1, arrow::default_memory_pool()); + auto col2_rb1_arrow = types::ToArrow(col2_rb1, arrow::default_memory_pool()); + EXPECT_OK(rb1.AddColumn(col1_rb1_arrow)); + EXPECT_OK(rb1.AddColumn(col2_rb1_arrow)); + int64_t rb1_size = 3 * sizeof(int64_t) + 12 * sizeof(char) + 3 * sizeof(uint32_t); + + EXPECT_OK(table.WriteRowBatch(rb1)); + EXPECT_EQ(table.GetTableStats().bytes, rb1_size); + + schema::RowBatch rb2(rd, 2); + std::vector col1_rb2 = {4, 5}; + std::vector col2_rb2 = {"a", "bc"}; + auto col1_rb2_arrow = types::ToArrow(col1_rb2, arrow::default_memory_pool()); + auto col2_rb2_arrow = types::ToArrow(col2_rb2, arrow::default_memory_pool()); + EXPECT_OK(rb2.AddColumn(col1_rb2_arrow)); + EXPECT_OK(rb2.AddColumn(col2_rb2_arrow)); + int64_t rb2_size = 2 * sizeof(int64_t) + 3 * sizeof(char) + 2 * sizeof(uint32_t); + + EXPECT_OK(table.WriteRowBatch(rb2)); + EXPECT_EQ(table.GetTableStats().bytes, rb1_size + rb2_size); + + std::vector time_hot_col1 = {1, 5, 3}; + std::vector time_hot_col2 = {"test", "abc", "de"}; + auto wrapper_batch_1 = std::make_unique(); + auto col_wrapper_1 = std::make_shared(3); + col_wrapper_1->Clear(); + for (const auto& num : time_hot_col1) { + col_wrapper_1->Append(num); + } + auto col_wrapper_2 = std::make_shared(3); + col_wrapper_2->Clear(); + for (const auto& num : time_hot_col2) { + col_wrapper_2->Append(num); + } + wrapper_batch_1->push_back(col_wrapper_1); + wrapper_batch_1->push_back(col_wrapper_2); + int64_t rb3_size = 3 * sizeof(int64_t) + 9 * sizeof(char) + 3 * sizeof(uint32_t); + + EXPECT_OK(table.TransferRecordBatch(std::move(wrapper_batch_1))); + + EXPECT_EQ(table.GetTableStats().bytes, rb1_size + rb2_size + rb3_size); +} + +TEST(TableTest, HotOnlyTable_bytes_test) { + auto rd = schema::RowDescriptor({types::DataType::INT64, types::DataType::STRING}); + schema::Relation rel(rd.types(), {"col1", "col2"}); + + std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); Table& table = *table_ptr; schema::RowBatch rb1(rd, 3); @@ -188,7 +297,7 @@ TEST(TableTest, bytes_test_w_compaction) { // Make minimum batch size rb1_size + rb2_size so that compaction causes 2 of the 3 batches to // be compacted into cold. std::shared_ptr
table_ptr = - std::make_shared
("test_table", rel, 128 * 1024, rb1_size + rb2_size); + std::make_shared("test_table", rel, 128 * 1024, rb1_size + rb2_size); Table& table = *table_ptr; EXPECT_OK(table.WriteRowBatch(rb1)); @@ -208,7 +317,7 @@ TEST(TableTest, expiry_test) { auto rd = schema::RowDescriptor({types::DataType::INT64, types::DataType::STRING}); schema::Relation rel(rd.types(), {"col1", "col2"}); - Table table("test_table", rel, 80); + HotColdTable table("test_table", rel, 80); schema::RowBatch rb1(rd, 3); std::vector col1_rb1 = {4, 5, 10}; @@ -354,7 +463,7 @@ TEST(TableTest, expiry_test_w_compaction) { wrapper_batch_1_2->push_back(col_wrapper_2_2); int64_t rb5_size = 5 * sizeof(int64_t) + 20 * sizeof(char) + 5 * sizeof(uint32_t); - Table table("test_table", rel, 80, 40); + HotColdTable table("test_table", rel, 80, 40); EXPECT_OK(table.WriteRowBatch(rb1)); EXPECT_EQ(table.GetTableStats().bytes, rb1_size); @@ -376,7 +485,7 @@ TEST(TableTest, batch_size_too_big) { auto rd = schema::RowDescriptor({types::DataType::INT64, types::DataType::STRING}); schema::Relation rel(rd.types(), {"col1", "col2"}); - Table table("test_table", rel, 10); + HotColdTable table("test_table", rel, 10); schema::RowBatch rb1(rd, 3); std::vector col1_rb1 = {4, 5, 10}; @@ -394,7 +503,7 @@ TEST(TableTest, write_row_batch) { auto rd = schema::RowDescriptor({types::DataType::BOOLEAN, types::DataType::INT64}); schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - std::shared_ptr
table_ptr = Table::Create("test_table", rel); + std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); Table& table = *table_ptr; schema::RowBatch rb1(rd, 2); @@ -407,7 +516,32 @@ TEST(TableTest, write_row_batch) { EXPECT_OK(table.WriteRowBatch(rb1)); - Table::Cursor cursor(table_ptr.get()); + Cursor cursor(table_ptr.get()); + auto rb_or_s = cursor.GetNextRowBatch({0, 1}); + ASSERT_OK(rb_or_s); + auto actual_rb = rb_or_s.ConsumeValueOrDie(); + EXPECT_TRUE(actual_rb->ColumnAt(0)->Equals(col1_rb1_arrow)); + EXPECT_TRUE(actual_rb->ColumnAt(1)->Equals(col2_rb1_arrow)); +} + +TEST(TableTest, HotOnlyTable_write_row_batch) { + auto rd = schema::RowDescriptor({types::DataType::BOOLEAN, types::DataType::INT64}); + schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); + + std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); + Table& table = *table_ptr; + + schema::RowBatch rb1(rd, 2); + std::vector col1_rb1 = {true, false}; + std::vector col2_rb1 = {1, 2}; + auto col1_rb1_arrow = types::ToArrow(col1_rb1, arrow::default_memory_pool()); + auto col2_rb1_arrow = types::ToArrow(col2_rb1, arrow::default_memory_pool()); + EXPECT_OK(rb1.AddColumn(col1_rb1_arrow)); + EXPECT_OK(rb1.AddColumn(col2_rb1_arrow)); + + EXPECT_OK(table.WriteRowBatch(rb1)); + + Cursor cursor(table_ptr.get()); auto rb_or_s = cursor.GetNextRowBatch({0, 1}); ASSERT_OK(rb_or_s); auto actual_rb = rb_or_s.ConsumeValueOrDie(); @@ -418,7 +552,48 @@ TEST(TableTest, write_row_batch) { TEST(TableTest, hot_batches_test) { schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - std::shared_ptr
table_ptr = Table::Create("table_name", rel); + std::shared_ptr
table_ptr = HotColdTable::Create("table_name", rel); + Table& table = *table_ptr; + + std::vector col1_in1 = {true, false, true}; + auto col1_in1_wrapper = + types::ColumnWrapper::FromArrow(types::ToArrow(col1_in1, arrow::default_memory_pool())); + std::vector col1_in2 = {false, false}; + auto col1_in2_wrapper = + types::ColumnWrapper::FromArrow(types::ToArrow(col1_in2, arrow::default_memory_pool())); + + std::vector col2_in1 = {1, 2, 3}; + auto col2_in1_wrapper = + types::ColumnWrapper::FromArrow(types::ToArrow(col2_in1, arrow::default_memory_pool())); + std::vector col2_in2 = {5, 6}; + auto col2_in2_wrapper = + types::ColumnWrapper::FromArrow(types::ToArrow(col2_in2, arrow::default_memory_pool())); + + auto rb_wrapper_1 = std::make_unique(); + rb_wrapper_1->push_back(col1_in1_wrapper); + rb_wrapper_1->push_back(col2_in1_wrapper); + EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_1))); + + auto rb_wrapper_2 = std::make_unique(); + rb_wrapper_2->push_back(col1_in2_wrapper); + rb_wrapper_2->push_back(col2_in2_wrapper); + EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_2))); + + Cursor cursor(table_ptr.get()); + auto rb1 = cursor.GetNextRowBatch({0, 1}).ConsumeValueOrDie(); + EXPECT_TRUE(rb1->ColumnAt(0)->Equals(types::ToArrow(col1_in1, arrow::default_memory_pool()))); + EXPECT_TRUE(rb1->ColumnAt(1)->Equals(types::ToArrow(col2_in1, arrow::default_memory_pool()))); + + auto rb2 = cursor.GetNextRowBatch({0, 1}).ConsumeValueOrDie(); + ASSERT_NE(rb2, nullptr); + EXPECT_TRUE(rb2->ColumnAt(0)->Equals(types::ToArrow(col1_in2, arrow::default_memory_pool()))); + EXPECT_TRUE(rb2->ColumnAt(1)->Equals(types::ToArrow(col2_in2, arrow::default_memory_pool()))); +} + +TEST(TableTest, HotOnlyTable_hot_batches_test) { + schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); + + std::shared_ptr
table_ptr = HotOnlyTable::Create("table_name", rel); Table& table = *table_ptr; std::vector col1_in1 = {true, false, true}; @@ -445,7 +620,7 @@ TEST(TableTest, hot_batches_test) { rb_wrapper_2->push_back(col2_in2_wrapper); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_2))); - Table::Cursor cursor(table_ptr.get()); + Cursor cursor(table_ptr.get()); auto rb1 = cursor.GetNextRowBatch({0, 1}).ConsumeValueOrDie(); EXPECT_TRUE(rb1->ColumnAt(0)->Equals(types::ToArrow(col1_in1, arrow::default_memory_pool()))); EXPECT_TRUE(rb1->ColumnAt(1)->Equals(types::ToArrow(col2_in1, arrow::default_memory_pool()))); @@ -482,12 +657,12 @@ TEST(TableTest, hot_batches_w_compaction_test) { rb_wrapper_2->push_back(col1_in2_wrapper); rb_wrapper_2->push_back(col2_in2_wrapper); - Table table("test_table", rel, 128 * 1024, rb1_size); + HotColdTable table("test_table", rel, 128 * 1024, rb1_size); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_1))); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_2))); - Table::Cursor cursor(&table); + Cursor cursor(&table); auto rb1 = cursor.GetNextRowBatch({0, 1}).ConsumeValueOrDie(); EXPECT_TRUE(rb1->ColumnAt(0)->Equals(types::ToArrow(col1_in1, arrow::default_memory_pool()))); EXPECT_TRUE(rb1->ColumnAt(1)->Equals(types::ToArrow(col2_in1, arrow::default_memory_pool()))); @@ -502,7 +677,7 @@ TEST(TableTest, hot_batches_w_compaction_test) { TEST(TableTest, find_rowid_from_time_first_greater_than_or_equal) { schema::Relation rel(std::vector({types::DataType::TIME64NS}), std::vector({"time_"})); - std::shared_ptr
table_ptr = Table::Create("test_table", rel); + std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); Table& table = *table_ptr; std::vector time_batch_1 = {2, 3, 4, 6}; @@ -579,7 +754,7 @@ TEST(TableTest, find_rowid_from_time_first_greater_than_or_equal_with_compaction schema::Relation rel(std::vector({types::DataType::TIME64NS}), std::vector({"time_"})); int64_t compaction_size = 4 * sizeof(int64_t); - Table table("test_table", rel, 128 * 1024, compaction_size); + HotColdTable table("test_table", rel, 128 * 1024, compaction_size); std::vector time_batch_1 = {2, 3, 4, 6}; std::vector time_batch_2 = {8, 8, 8}; @@ -717,11 +892,96 @@ TEST(TableTest, ToProto) { EXPECT_TRUE(differ.Compare(expected_proto, table_proto)); } +// TODO(ddelnano): Not sure if this matters since I believe StopSpec::Inifinite will hit +// an error for this ToProto test. +TEST(TableTest, DISABLED_HotOnlyTable_ToProto) { + auto table = HotOnlyTestTable(); + table_store::schemapb::Table table_proto; + EXPECT_OK(table->ToProto(&table_proto)); + + std::string expected = R"( + relation { + columns { + column_name: "col1" + column_type: FLOAT64 + column_semantic_type: ST_NONE + } + columns { + column_name: "col2" + column_type: INT64 + column_semantic_type: ST_NONE + } + } + row_batches { + cols { + float64_data { + data: 0.5 + data: 1.2 + data: 5.3 + } + } + cols { + int64_data { + data: 1 + data: 2 + data: 3 + } + } + eow: false + eos: false + num_rows: 3 + } + row_batches { + cols { + float64_data { + data: 0.1 + data: 5.1 + } + } + cols { + int64_data { + data: 5 + data: 6 + } + } + eow: true + eos: true + num_rows: 2 + })"; + + google::protobuf::util::MessageDifferencer differ; + table_store::schemapb::Table expected_proto; + ASSERT_TRUE(google::protobuf::TextFormat::MergeFromString(expected, &expected_proto)); + EXPECT_TRUE(differ.Compare(expected_proto, table_proto)); +} + TEST(TableTest, transfer_empty_record_batch_test) { schema::Relation rel({types::DataType::INT64}, {"col1"}); schema::RowDescriptor rd({types::DataType::INT64}); - std::shared_ptr
table_ptr = Table::Create("test_table", rel); + std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); + Table& table = *table_ptr; + + // ColumnWrapper with no columns should not be added to row batches. + auto wrapper_batch_1 = std::make_unique(); + EXPECT_OK(table.TransferRecordBatch(std::move(wrapper_batch_1))); + + EXPECT_EQ(table.GetTableStats().batches_added, 0); + + // Column wrapper with empty columns should not be added to row batches. + auto wrapper_batch_2 = std::make_unique(); + auto col_wrapper_2 = std::make_shared(0); + wrapper_batch_2->push_back(col_wrapper_2); + EXPECT_OK(table.TransferRecordBatch(std::move(wrapper_batch_2))); + + EXPECT_EQ(table.GetTableStats().batches_added, 0); +} + +TEST(TableTest, HotOnlyTable_transfer_empty_record_batch_test) { + schema::Relation rel({types::DataType::INT64}, {"col1"}); + schema::RowDescriptor rd({types::DataType::INT64}); + + std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); Table& table = *table_ptr; // ColumnWrapper with no columns should not be added to row batches. @@ -743,7 +1003,22 @@ TEST(TableTest, write_zero_row_row_batch) { schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); schema::RowDescriptor rd({types::DataType::BOOLEAN, types::DataType::INT64}); - std::shared_ptr
table_ptr = Table::Create("test_table", rel); + std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); + + auto result = schema::RowBatch::WithZeroRows(rd, /*eow*/ false, /*eos*/ false); + ASSERT_OK(result); + auto rb_ptr = result.ConsumeValueOrDie(); + + EXPECT_OK(table_ptr->WriteRowBatch(*rb_ptr)); + // Row batch with 0 rows won't be written. + EXPECT_EQ(table_ptr->GetTableStats().batches_added, 0); +} + +TEST(TableTest, HotOnlyTable_write_zero_row_row_batch) { + schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); + schema::RowDescriptor rd({types::DataType::BOOLEAN, types::DataType::INT64}); + + std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); auto result = schema::RowBatch::WithZeroRows(rd, /*eow*/ false, /*eos*/ false); ASSERT_OK(result); @@ -771,7 +1046,7 @@ TEST(TableTest, threaded) { schema::Relation rel({types::DataType::TIME64NS}, {"time_"}); schema::RowDescriptor rd({types::DataType::TIME64NS}); std::shared_ptr
table_ptr = - std::make_shared
("test_table", rel, 8 * 1024 * 1024, 5 * 1024); + std::make_shared("test_table", rel, 8 * 1024 * 1024, 5 * 1024); int64_t max_time_counter = 1024 * 1024; @@ -786,8 +1061,8 @@ TEST(TableTest, threaded) { }); // Create the cursor before the write thread starts, to ensure that we get every row of the table. - Table::Cursor cursor(table_ptr.get(), Table::Cursor::StartSpec{}, - Table::Cursor::StopSpec{Table::Cursor::StopSpec::StopType::Infinite}); + Cursor cursor(table_ptr.get(), Cursor::StartSpec{}, + Cursor::StopSpec{Cursor::StopSpec::StopType::Infinite}); std::thread writer_thread([table_ptr, done, max_time_counter]() { std::default_random_engine gen; @@ -844,7 +1119,7 @@ TEST(TableTest, threaded) { } // Now that the writer is finished move the stop of the cursor to the current end of the table. - cursor.UpdateStopSpec(Table::Cursor::StopSpec{Table::Cursor::StopSpec::CurrentEndOfTable}); + cursor.UpdateStopSpec(Cursor::StopSpec{Cursor::StopSpec::CurrentEndOfTable}); // Once the writer is finished, we loop over the remaining data in the table. while (time_counter < max_time_counter && !cursor.Done()) { @@ -872,7 +1147,7 @@ TEST(TableTest, NextBatch_generation_bug) { schema::Relation rel(rd.types(), {"col1", "col2"}); int64_t rb1_size = 3 * sizeof(int64_t) + 12 * sizeof(char) + 3 * sizeof(uint32_t); - Table table("test_table", rel, rb1_size, rb1_size); + HotColdTable table("test_table", rel, rb1_size, rb1_size); schema::RowBatch rb1(rd, 3); std::vector col1_rb1 = {4, 5, 10}; @@ -885,7 +1160,7 @@ TEST(TableTest, NextBatch_generation_bug) { EXPECT_OK(table.WriteRowBatch(rb1)); EXPECT_OK(table.CompactHotToCold(arrow::default_memory_pool())); - Table::Cursor cursor(&table, Table::Cursor::StartSpec{}, Table::Cursor::StopSpec{}); + Cursor cursor(&table, Cursor::StartSpec{}, Cursor::StopSpec{}); // Force cold expiration. EXPECT_OK(table.WriteRowBatch(rb1)); // GetNextRowBatch should return invalidargument since the batch was expired. @@ -919,12 +1194,12 @@ TEST(TableTest, GetNextRowBatch_after_expiry) { rb_wrapper_2->push_back(col2_in2_wrapper); int64_t rb2_size = 2 * sizeof(bool) + 2 * sizeof(int64_t); - Table table("test_table", rel, rb1_size + rb2_size, rb1_size); + HotColdTable table("test_table", rel, rb1_size + rb2_size, rb1_size); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_1))); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_2))); - Table::Cursor cursor(&table); + Cursor cursor(&table); // This write will expire the first batch. auto rb_wrapper_1_copy = std::make_unique(); @@ -942,8 +1217,8 @@ TEST(TableTest, GetNextRowBatch_after_expiry) { struct CursorTestCase { std::string name; std::vector> initial_time_batches; - Table::Cursor::StartSpec start_spec; - Table::Cursor::StopSpec stop_spec; + Cursor::StartSpec start_spec; + Cursor::StopSpec stop_spec; struct Action { enum ActionType { ExpectBatch, @@ -965,14 +1240,14 @@ class CursorTableTest : public ::testing::Test, rel_ = std::make_unique(std::vector{types::TIME64NS}, std::vector{"time_"}); - table_ptr_ = Table::Create("test_table", *rel_); + table_ptr_ = HotColdTable::Create("test_table", *rel_); for (const auto& batch : test_case_.initial_time_batches) { WriteBatch(batch); } - cursor_ = std::make_unique(table_ptr_.get(), test_case_.start_spec, - test_case_.stop_spec); + cursor_ = + std::make_unique(table_ptr_.get(), test_case_.start_spec, test_case_.stop_spec); } void WriteBatch(const std::vector& times) { @@ -1002,7 +1277,7 @@ class CursorTableTest : public ::testing::Test, CursorTestCase test_case_; std::unique_ptr rel_; std::shared_ptr
table_ptr_; - std::unique_ptr cursor_; + std::unique_ptr cursor_; }; TEST_P(CursorTableTest, cursor_test) { @@ -1021,8 +1296,8 @@ TEST_P(CursorTableTest, cursor_test) { } } -using StartType = Table::Cursor::StartSpec::StartType; -using StopType = Table::Cursor::StopSpec::StopType; +using StartType = Cursor::StartSpec::StartType; +using StopType = Cursor::StopSpec::StopType; INSTANTIATE_TEST_SUITE_P(CursorTableTestSuite, CursorTableTest, ::testing::ValuesIn(std::vector{ diff --git a/src/table_store/table/tablets_group.cc b/src/table_store/table/tablets_group.cc index adf6e0f961a..9c35d0a9bbb 100644 --- a/src/table_store/table/tablets_group.cc +++ b/src/table_store/table/tablets_group.cc @@ -24,7 +24,7 @@ namespace table_store { void TabletsGroup::CreateTablet(const types::TabletID& tablet_id) { LOG_IF(DFATAL, HasTablet(tablet_id)) << absl::Substitute("Tablet with id $0 already exists in Table.", tablet_id); - tablet_id_to_tablet_map_[tablet_id] = Table::Create(tablet_id, relation_); + tablet_id_to_tablet_map_[tablet_id] = HotColdTable::Create(tablet_id, relation_); } void TabletsGroup::AddTablet(const types::TabletID& tablet_id, std::shared_ptr
tablet) { diff --git a/src/table_store/table/tablets_group_test.cc b/src/table_store/table/tablets_group_test.cc index a9ec26ba7da..6d34aac5637 100644 --- a/src/table_store/table/tablets_group_test.cc +++ b/src/table_store/table/tablets_group_test.cc @@ -40,8 +40,8 @@ class TabletsGroupTest : public ::testing::Test { rel2 = schema::Relation({types::DataType::INT64, types::DataType::FLOAT64, types::DataType::INT64}, {"table2col1", "table2col2", "table2col3"}); - tablet1 = Table::Create("test_table1", rel1); - tablet2 = Table::Create("test_table2", rel2); + tablet1 = HotColdTable::Create("test_table1", rel1); + tablet2 = HotColdTable::Create("test_table2", rel2); } std::shared_ptr
tablet1; diff --git a/src/table_store/test_utils.h b/src/table_store/test_utils.h index ae524c612e0..2f4bc35ab4b 100644 --- a/src/table_store/test_utils.h +++ b/src/table_store/test_utils.h @@ -61,7 +61,7 @@ inline StatusOr> CreateTable( const datagen::DistributionParams* dist_vars, const datagen::DistributionParams* len_vars) { schema::RowDescriptor rd(types); - auto table = Table::Create("test_table", table_store::schema::Relation(types, col_names)); + auto table = HotColdTable::Create("test_table", table_store::schema::Relation(types, col_names)); for (int batch_idx = 0; batch_idx < num_batches; batch_idx++) { auto rb = schema::RowBatch(schema::RowDescriptor(types), rb_size); diff --git a/src/ui/src/utils/pxl.ts b/src/ui/src/utils/pxl.ts index ba44b9e4ac5..cc07e2c06bd 100644 --- a/src/ui/src/utils/pxl.ts +++ b/src/ui/src/utils/pxl.ts @@ -20,6 +20,8 @@ const pxlMutations = [ 'from pxtrace', 'import pxtrace', + 'from pxlog', + 'import pxlog', 'import pxconfig', ]; diff --git a/src/vizier/funcs/context/vizier_context.h b/src/vizier/funcs/context/vizier_context.h index a431c4cdd12..6820ac738f3 100644 --- a/src/vizier/funcs/context/vizier_context.h +++ b/src/vizier/funcs/context/vizier_context.h @@ -42,17 +42,19 @@ class VizierFuncFactoryContext : public NotCopyable { public: using MDSStub = services::metadata::MetadataService::Stub; using MDTPStub = services::metadata::MetadataTracepointService::Stub; + using MDFSStub = services::metadata::MetadataFileSourceService::Stub; VizierFuncFactoryContext() = default; VizierFuncFactoryContext( const agent::BaseManager* agent_manager, const std::shared_ptr& mds_stub, - const std::shared_ptr& mdtp_stub, + const std::shared_ptr& mdtp_stub, const std::shared_ptr& mdfs_stub, const std::shared_ptr& cronscript_stub, std::shared_ptr<::px::table_store::TableStore> table_store, std::function add_grpc_auth) : agent_manager_(agent_manager), mds_stub_(mds_stub), mdtp_stub_(mdtp_stub), + mdfs_stub_(mdfs_stub), cronscript_stub_(cronscript_stub), table_store_(table_store), add_auth_to_grpc_context_func_(add_grpc_auth) {} @@ -72,6 +74,10 @@ class VizierFuncFactoryContext : public NotCopyable { CHECK(mdtp_stub_ != nullptr); return mdtp_stub_; } + std::shared_ptr mdfs_stub() const { + CHECK(mdfs_stub_ != nullptr); + return mdfs_stub_; + } std::shared_ptr cronscript_stub() const { CHECK(cronscript_stub_ != nullptr); return cronscript_stub_; @@ -88,6 +94,7 @@ class VizierFuncFactoryContext : public NotCopyable { const agent::BaseManager* agent_manager_ = nullptr; std::shared_ptr mds_stub_ = nullptr; std::shared_ptr mdtp_stub_ = nullptr; + std::shared_ptr mdfs_stub_ = nullptr; std::shared_ptr cronscript_stub_ = nullptr; std::shared_ptr<::px::table_store::TableStore> table_store_ = nullptr; std::function add_auth_to_grpc_context_func_; diff --git a/src/vizier/funcs/md_udtfs/md_udtfs.cc b/src/vizier/funcs/md_udtfs/md_udtfs.cc index 193c6d45dff..9ebc5c75eed 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs.cc +++ b/src/vizier/funcs/md_udtfs/md_udtfs.cc @@ -55,6 +55,8 @@ void RegisterFuncsOrDie(const VizierFuncFactoryContext& ctx, carnot::udf::Regist registry->RegisterFactoryOrDie>( "GetTracepointStatus", ctx); + registry->RegisterFactoryOrDie>( + "GetFileSourceStatus", ctx); registry ->RegisterFactoryOrDie>( "GetCronScriptHistory", ctx); diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index e48dd4ce790..55bc5492d0f 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -75,6 +75,20 @@ class UDTFWithMDTPFactory : public carnot::udf::UDTFFactory { const VizierFuncFactoryContext& ctx_; }; +template +class UDTFWithMDFSFactory : public carnot::udf::UDTFFactory { + public: + UDTFWithMDFSFactory() = delete; + explicit UDTFWithMDFSFactory(const VizierFuncFactoryContext& ctx) : ctx_(ctx) {} + + std::unique_ptr Make() override { + return std::make_unique(ctx_.mdfs_stub(), ctx_.add_auth_to_grpc_context_func()); + } + + private: + const VizierFuncFactoryContext& ctx_; +}; + template class UDTFWithCronscriptFactory : public carnot::udf::UDTFFactory { public: @@ -136,7 +150,9 @@ class GetTables final : public carnot::udf::UDTF { return MakeArray(ColInfo("table_name", types::DataType::STRING, types::PatternType::GENERAL, "The table name"), ColInfo("table_desc", types::DataType::STRING, types::PatternType::GENERAL, - "Description of the table")); + "Description of the table"), + ColInfo("table_metadata", types::DataType::STRING, types::PatternType::GENERAL, + "Metadata of the table in JSON")); } Status Init(FunctionContext*) { @@ -151,7 +167,7 @@ class GetTables final : public carnot::udf::UDTF { } for (const auto& [table_name, rel] : resp.schema().relation_map()) { - table_info_.emplace_back(table_name, rel.desc()); + table_info_.emplace_back(table_name, rel.desc(), rel.mutation_id()); } return Status::OK(); } @@ -163,6 +179,7 @@ class GetTables final : public carnot::udf::UDTF { const auto& r = table_info_[idx_]; rw->Append(r.table_name); rw->Append(r.table_desc); + rw->Append(r.table_metadata); idx_++; return idx_ < static_cast(table_info_.size()); @@ -170,10 +187,12 @@ class GetTables final : public carnot::udf::UDTF { private: struct TableInfo { - TableInfo(const std::string& table_name, const std::string& table_desc) - : table_name(table_name), table_desc(table_desc) {} + TableInfo(const std::string& table_name, const std::string& table_desc, + const std::string& table_metadata) + : table_name(table_name), table_desc(table_desc), table_metadata(table_metadata) {} std::string table_name; std::string table_desc; + std::string table_metadata; }; int idx_ = 0; @@ -880,6 +899,8 @@ class GetTracepointStatus final : public carnot::udf::UDTF static constexpr auto OutputRelation() { return MakeArray(ColInfo("tracepoint_id", types::DataType::UINT128, types::PatternType::GENERAL, "The id of the tracepoint"), + ColInfo("tracepoint_id_str", types::DataType::STRING, types::PatternType::GENERAL, + "The string id of the tracepoint"), ColInfo("name", types::DataType::STRING, types::PatternType::GENERAL, "The name of the tracepoint"), ColInfo("state", types::DataType::STRING, types::PatternType::GENERAL, @@ -958,6 +979,7 @@ class GetTracepointStatus final : public carnot::udf::UDTF tables.Accept(tables_writer); rw->Append(absl::MakeUint128(u.ab, u.cd)); + rw->Append(u.str()); rw->Append(tracepoint_info.name()); rw->Append(state); @@ -984,6 +1006,130 @@ class GetTracepointStatus final : public carnot::udf::UDTF std::function add_context_authentication_func_; }; +/** + * This UDTF fetches information about tracepoints from MDS. + */ +class GetFileSourceStatus final : public carnot::udf::UDTF { + public: + using MDFSStub = vizier::services::metadata::MetadataFileSourceService::Stub; + using FileSourceResponse = vizier::services::metadata::GetFileSourceInfoResponse; + GetFileSourceStatus() = delete; + explicit GetFileSourceStatus(std::shared_ptr stub, + std::function add_context_authentication) + : idx_(0), stub_(stub), add_context_authentication_func_(add_context_authentication) {} + + static constexpr auto Executor() { return carnot::udfspb::UDTFSourceExecutor::UDTF_ONE_KELVIN; } + + static constexpr auto OutputRelation() { + // TODO(ddelnano): Change the file_source_id column to a UINT128 once the pxl lookup from + // px/pipeline_flow_graph works. That script has a UINT128 stored as a string and needs to + // be joined with this column + return MakeArray(ColInfo("file_source_id", types::DataType::STRING, + types::PatternType::GENERAL, "The id of the file source"), + ColInfo("name", types::DataType::STRING, types::PatternType::GENERAL, + "The name of the file source"), + ColInfo("state", types::DataType::STRING, types::PatternType::GENERAL, + "The state of the file source"), + ColInfo("status", types::DataType::STRING, types::PatternType::GENERAL, + "The status message if not healthy"), + ColInfo("output_tables", types::DataType::STRING, types::PatternType::GENERAL, + "A list of tables output by the file source")); + // TODO(ddelnano): Add in the create time, and TTL in here after we add those attributes to the + // GetFileSourceInfo RPC call in MDS. + } + + Status Init(FunctionContext*) { + px::vizier::services::metadata::GetFileSourceInfoRequest req; + resp_ = std::make_unique(); + + grpc::ClientContext ctx; + add_context_authentication_func_(&ctx); + auto s = stub_->GetFileSourceInfo(&ctx, req, resp_.get()); + if (!s.ok()) { + return error::Internal("Failed to make RPC call to GetFileSourceStatus: $0", + s.error_message()); + } + return Status::OK(); + } + + bool NextRecord(FunctionContext*, RecordWriter* rw) { + if (resp_->file_sources_size() == 0) { + return false; + } + const auto& file_source_info = resp_->file_sources(idx_); + + auto u_or_s = ParseUUID(file_source_info.id()); + sole::uuid u; + if (u_or_s.ok()) { + u = u_or_s.ConsumeValueOrDie(); + } + + auto actual = file_source_info.state(); + auto expected = file_source_info.expected_state(); + std::string state; + + switch (actual) { + case statuspb::PENDING_STATE: { + state = "pending"; + break; + } + case statuspb::RUNNING_STATE: { + state = "running"; + break; + } + case statuspb::FAILED_STATE: { + state = "failed"; + break; + } + case statuspb::TERMINATED_STATE: { + if (actual != expected) { + state = "terminating"; + } else { + state = "terminated"; + } + break; + } + default: + state = "unknown"; + } + + rapidjson::Document tables; + tables.SetArray(); + for (const auto& table : file_source_info.schema_names()) { + tables.PushBack(internal::StringRef(table), tables.GetAllocator()); + } + + rapidjson::StringBuffer tables_sb; + rapidjson::Writer tables_writer(tables_sb); + tables.Accept(tables_writer); + + rw->Append(u.str()); + rw->Append(file_source_info.name()); + rw->Append(state); + + rapidjson::Document statuses; + statuses.SetArray(); + for (const auto& status : file_source_info.statuses()) { + statuses.PushBack(internal::StringRef(status.msg()), statuses.GetAllocator()); + } + rapidjson::StringBuffer statuses_sb; + rapidjson::Writer statuses_writer(statuses_sb); + statuses.Accept(statuses_writer); + rw->Append(statuses_sb.GetString()); + + rw->Append(tables_sb.GetString()); + + ++idx_; + return idx_ < resp_->file_sources_size(); + } + + private: + int idx_ = 0; + std::unique_ptr resp_; + std::shared_ptr stub_; + std::function add_context_authentication_func_; +}; + class GetCronScriptHistory final : public carnot::udf::UDTF { public: using CronScriptStoreStub = vizier::services::metadata::CronScriptStoreService::Stub; diff --git a/src/vizier/messages/messagespb/BUILD.bazel b/src/vizier/messages/messagespb/BUILD.bazel index 5be2739d0dc..902b666b693 100644 --- a/src/vizier/messages/messagespb/BUILD.bazel +++ b/src/vizier/messages/messagespb/BUILD.bazel @@ -24,6 +24,7 @@ pl_proto_library( "//src/api/proto/uuidpb:uuid_pl_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_proto", + "//src/carnot/planner/file_source/ir:logical_pl_proto", "//src/carnot/planpb:plan_pl_proto", "//src/common/base/statuspb:status_pl_proto", "//src/shared/bloomfilterpb:bloomfilter_pl_proto", @@ -44,6 +45,7 @@ pl_cc_proto_library( "//src/api/proto/uuidpb:uuid_pl_cc_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_cc_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", + "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/carnot/planpb:plan_pl_cc_proto", "//src/common/base/statuspb:status_pl_cc_proto", "//src/shared/bloomfilterpb:bloomfilter_pl_cc_proto", @@ -65,6 +67,7 @@ pl_go_proto_library( "//src/api/proto/uuidpb:uuid_pl_go_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_go_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto", + "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/carnot/planpb:plan_pl_go_proto", "//src/common/base/statuspb:status_pl_go_proto", "//src/shared/bloomfilterpb:bloomfilter_pl_go_proto", diff --git a/src/vizier/messages/messagespb/messages.pb.go b/src/vizier/messages/messagespb/messages.pb.go index 02713a1d90b..f2b36a62af1 100755 --- a/src/vizier/messages/messagespb/messages.pb.go +++ b/src/vizier/messages/messagespb/messages.pb.go @@ -13,6 +13,7 @@ import ( uuidpb "px.dev/pixie/src/api/proto/uuidpb" distributedpb "px.dev/pixie/src/carnot/planner/distributedpb" logicalpb "px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb" + ir "px.dev/pixie/src/carnot/planner/file_source/ir" planpb "px.dev/pixie/src/carnot/planpb" statuspb "px.dev/pixie/src/common/base/statuspb" metadatapb "px.dev/pixie/src/shared/k8s/metadatapb" @@ -44,6 +45,7 @@ type VizierMessage struct { // *VizierMessage_TracepointMessage // *VizierMessage_ConfigUpdateMessage // *VizierMessage_K8SMetadataMessage + // *VizierMessage_FileSourceMessage Msg isVizierMessage_Msg `protobuf_oneof:"msg"` } @@ -113,6 +115,9 @@ type VizierMessage_ConfigUpdateMessage struct { type VizierMessage_K8SMetadataMessage struct { K8SMetadataMessage *K8SMetadataMessage `protobuf:"bytes,12,opt,name=k8s_metadata_message,json=k8sMetadataMessage,proto3,oneof" json:"k8s_metadata_message,omitempty"` } +type VizierMessage_FileSourceMessage struct { + FileSourceMessage *FileSourceMessage `protobuf:"bytes,13,opt,name=file_source_message,json=fileSourceMessage,proto3,oneof" json:"file_source_message,omitempty"` +} func (*VizierMessage_RegisterAgentRequest) isVizierMessage_Msg() {} func (*VizierMessage_RegisterAgentResponse) isVizierMessage_Msg() {} @@ -123,6 +128,7 @@ func (*VizierMessage_ExecuteQueryRequest) isVizierMessage_Msg() {} func (*VizierMessage_TracepointMessage) isVizierMessage_Msg() {} func (*VizierMessage_ConfigUpdateMessage) isVizierMessage_Msg() {} func (*VizierMessage_K8SMetadataMessage) isVizierMessage_Msg() {} +func (*VizierMessage_FileSourceMessage) isVizierMessage_Msg() {} func (m *VizierMessage) GetMsg() isVizierMessage_Msg { if m != nil { @@ -194,6 +200,13 @@ func (m *VizierMessage) GetK8SMetadataMessage() *K8SMetadataMessage { return nil } +func (m *VizierMessage) GetFileSourceMessage() *FileSourceMessage { + if x, ok := m.GetMsg().(*VizierMessage_FileSourceMessage); ok { + return x.FileSourceMessage + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*VizierMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -206,6 +219,7 @@ func (*VizierMessage) XXX_OneofWrappers() []interface{} { (*VizierMessage_TracepointMessage)(nil), (*VizierMessage_ConfigUpdateMessage)(nil), (*VizierMessage_K8SMetadataMessage)(nil), + (*VizierMessage_FileSourceMessage)(nil), } } @@ -307,6 +321,104 @@ func (*TracepointMessage) XXX_OneofWrappers() []interface{} { } } +type FileSourceMessage struct { + // Types that are valid to be assigned to Msg: + // *FileSourceMessage_FileSourceInfoUpdate + // *FileSourceMessage_RemoveFileSourceRequest + // *FileSourceMessage_RegisterFileSourceRequest + Msg isFileSourceMessage_Msg `protobuf_oneof:"msg"` +} + +func (m *FileSourceMessage) Reset() { *m = FileSourceMessage{} } +func (*FileSourceMessage) ProtoMessage() {} +func (*FileSourceMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_0046fd1b9991f89c, []int{2} +} +func (m *FileSourceMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileSourceMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FileSourceMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FileSourceMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSourceMessage.Merge(m, src) +} +func (m *FileSourceMessage) XXX_Size() int { + return m.Size() +} +func (m *FileSourceMessage) XXX_DiscardUnknown() { + xxx_messageInfo_FileSourceMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSourceMessage proto.InternalMessageInfo + +type isFileSourceMessage_Msg interface { + isFileSourceMessage_Msg() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type FileSourceMessage_FileSourceInfoUpdate struct { + FileSourceInfoUpdate *FileSourceInfoUpdate `protobuf:"bytes,1,opt,name=file_source_info_update,json=fileSourceInfoUpdate,proto3,oneof" json:"file_source_info_update,omitempty"` +} +type FileSourceMessage_RemoveFileSourceRequest struct { + RemoveFileSourceRequest *RemoveFileSourceRequest `protobuf:"bytes,2,opt,name=remove_file_source_request,json=removeFileSourceRequest,proto3,oneof" json:"remove_file_source_request,omitempty"` +} +type FileSourceMessage_RegisterFileSourceRequest struct { + RegisterFileSourceRequest *RegisterFileSourceRequest `protobuf:"bytes,3,opt,name=register_file_source_request,json=registerFileSourceRequest,proto3,oneof" json:"register_file_source_request,omitempty"` +} + +func (*FileSourceMessage_FileSourceInfoUpdate) isFileSourceMessage_Msg() {} +func (*FileSourceMessage_RemoveFileSourceRequest) isFileSourceMessage_Msg() {} +func (*FileSourceMessage_RegisterFileSourceRequest) isFileSourceMessage_Msg() {} + +func (m *FileSourceMessage) GetMsg() isFileSourceMessage_Msg { + if m != nil { + return m.Msg + } + return nil +} + +func (m *FileSourceMessage) GetFileSourceInfoUpdate() *FileSourceInfoUpdate { + if x, ok := m.GetMsg().(*FileSourceMessage_FileSourceInfoUpdate); ok { + return x.FileSourceInfoUpdate + } + return nil +} + +func (m *FileSourceMessage) GetRemoveFileSourceRequest() *RemoveFileSourceRequest { + if x, ok := m.GetMsg().(*FileSourceMessage_RemoveFileSourceRequest); ok { + return x.RemoveFileSourceRequest + } + return nil +} + +func (m *FileSourceMessage) GetRegisterFileSourceRequest() *RegisterFileSourceRequest { + if x, ok := m.GetMsg().(*FileSourceMessage_RegisterFileSourceRequest); ok { + return x.RegisterFileSourceRequest + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*FileSourceMessage) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*FileSourceMessage_FileSourceInfoUpdate)(nil), + (*FileSourceMessage_RemoveFileSourceRequest)(nil), + (*FileSourceMessage_RegisterFileSourceRequest)(nil), + } +} + type ConfigUpdateMessage struct { // Types that are valid to be assigned to Msg: // *ConfigUpdateMessage_ConfigUpdateRequest @@ -316,7 +428,7 @@ type ConfigUpdateMessage struct { func (m *ConfigUpdateMessage) Reset() { *m = ConfigUpdateMessage{} } func (*ConfigUpdateMessage) ProtoMessage() {} func (*ConfigUpdateMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{2} + return fileDescriptor_0046fd1b9991f89c, []int{3} } func (m *ConfigUpdateMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -390,7 +502,7 @@ type K8SMetadataMessage struct { func (m *K8SMetadataMessage) Reset() { *m = K8SMetadataMessage{} } func (*K8SMetadataMessage) ProtoMessage() {} func (*K8SMetadataMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{3} + return fileDescriptor_0046fd1b9991f89c, []int{4} } func (m *K8SMetadataMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -485,7 +597,7 @@ type RegisterAgentRequest struct { func (m *RegisterAgentRequest) Reset() { *m = RegisterAgentRequest{} } func (*RegisterAgentRequest) ProtoMessage() {} func (*RegisterAgentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{4} + return fileDescriptor_0046fd1b9991f89c, []int{5} } func (m *RegisterAgentRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -535,7 +647,7 @@ type RegisterAgentResponse struct { func (m *RegisterAgentResponse) Reset() { *m = RegisterAgentResponse{} } func (*RegisterAgentResponse) ProtoMessage() {} func (*RegisterAgentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{5} + return fileDescriptor_0046fd1b9991f89c, []int{6} } func (m *RegisterAgentResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +690,7 @@ type AgentDataInfo struct { func (m *AgentDataInfo) Reset() { *m = AgentDataInfo{} } func (*AgentDataInfo) ProtoMessage() {} func (*AgentDataInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{6} + return fileDescriptor_0046fd1b9991f89c, []int{7} } func (m *AgentDataInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -625,7 +737,7 @@ type AgentUpdateInfo struct { func (m *AgentUpdateInfo) Reset() { *m = AgentUpdateInfo{} } func (*AgentUpdateInfo) ProtoMessage() {} func (*AgentUpdateInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{7} + return fileDescriptor_0046fd1b9991f89c, []int{8} } func (m *AgentUpdateInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -699,7 +811,7 @@ type Heartbeat struct { func (m *Heartbeat) Reset() { *m = Heartbeat{} } func (*Heartbeat) ProtoMessage() {} func (*Heartbeat) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{8} + return fileDescriptor_0046fd1b9991f89c, []int{9} } func (m *Heartbeat) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -764,7 +876,7 @@ type MetadataUpdateInfo struct { func (m *MetadataUpdateInfo) Reset() { *m = MetadataUpdateInfo{} } func (*MetadataUpdateInfo) ProtoMessage() {} func (*MetadataUpdateInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{9} + return fileDescriptor_0046fd1b9991f89c, []int{10} } func (m *MetadataUpdateInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -816,7 +928,7 @@ type HeartbeatAck struct { func (m *HeartbeatAck) Reset() { *m = HeartbeatAck{} } func (*HeartbeatAck) ProtoMessage() {} func (*HeartbeatAck) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{10} + return fileDescriptor_0046fd1b9991f89c, []int{11} } func (m *HeartbeatAck) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -873,7 +985,7 @@ type HeartbeatNack struct { func (m *HeartbeatNack) Reset() { *m = HeartbeatNack{} } func (*HeartbeatNack) ProtoMessage() {} func (*HeartbeatNack) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{11} + return fileDescriptor_0046fd1b9991f89c, []int{12} } func (m *HeartbeatNack) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -918,7 +1030,7 @@ type ExecuteQueryRequest struct { func (m *ExecuteQueryRequest) Reset() { *m = ExecuteQueryRequest{} } func (*ExecuteQueryRequest) ProtoMessage() {} func (*ExecuteQueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{12} + return fileDescriptor_0046fd1b9991f89c, []int{13} } func (m *ExecuteQueryRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1088,7 @@ type RegisterTracepointRequest struct { func (m *RegisterTracepointRequest) Reset() { *m = RegisterTracepointRequest{} } func (*RegisterTracepointRequest) ProtoMessage() {} func (*RegisterTracepointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{13} + return fileDescriptor_0046fd1b9991f89c, []int{14} } func (m *RegisterTracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1029,7 +1141,7 @@ type TracepointInfoUpdate struct { func (m *TracepointInfoUpdate) Reset() { *m = TracepointInfoUpdate{} } func (*TracepointInfoUpdate) ProtoMessage() {} func (*TracepointInfoUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{14} + return fileDescriptor_0046fd1b9991f89c, []int{15} } func (m *TracepointInfoUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1093,7 +1205,7 @@ type RemoveTracepointRequest struct { func (m *RemoveTracepointRequest) Reset() { *m = RemoveTracepointRequest{} } func (*RemoveTracepointRequest) ProtoMessage() {} func (*RemoveTracepointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{15} + return fileDescriptor_0046fd1b9991f89c, []int{16} } func (m *RemoveTracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1129,6 +1241,167 @@ func (m *RemoveTracepointRequest) GetID() *uuidpb.UUID { return nil } +type RegisterFileSourceRequest struct { + FileSourceDeployment *ir.FileSourceDeployment `protobuf:"bytes,1,opt,name=file_source_deployment,json=fileSourceDeployment,proto3" json:"file_source_deployment,omitempty"` + ID *uuidpb.UUID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *RegisterFileSourceRequest) Reset() { *m = RegisterFileSourceRequest{} } +func (*RegisterFileSourceRequest) ProtoMessage() {} +func (*RegisterFileSourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0046fd1b9991f89c, []int{17} +} +func (m *RegisterFileSourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegisterFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegisterFileSourceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RegisterFileSourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterFileSourceRequest.Merge(m, src) +} +func (m *RegisterFileSourceRequest) XXX_Size() int { + return m.Size() +} +func (m *RegisterFileSourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterFileSourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RegisterFileSourceRequest proto.InternalMessageInfo + +func (m *RegisterFileSourceRequest) GetFileSourceDeployment() *ir.FileSourceDeployment { + if m != nil { + return m.FileSourceDeployment + } + return nil +} + +func (m *RegisterFileSourceRequest) GetID() *uuidpb.UUID { + if m != nil { + return m.ID + } + return nil +} + +type FileSourceInfoUpdate struct { + ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + State statuspb.LifeCycleState `protobuf:"varint,2,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` + Status *statuspb.Status `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + AgentID *uuidpb.UUID `protobuf:"bytes,4,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` +} + +func (m *FileSourceInfoUpdate) Reset() { *m = FileSourceInfoUpdate{} } +func (*FileSourceInfoUpdate) ProtoMessage() {} +func (*FileSourceInfoUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_0046fd1b9991f89c, []int{18} +} +func (m *FileSourceInfoUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileSourceInfoUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FileSourceInfoUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FileSourceInfoUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSourceInfoUpdate.Merge(m, src) +} +func (m *FileSourceInfoUpdate) XXX_Size() int { + return m.Size() +} +func (m *FileSourceInfoUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_FileSourceInfoUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSourceInfoUpdate proto.InternalMessageInfo + +func (m *FileSourceInfoUpdate) GetID() *uuidpb.UUID { + if m != nil { + return m.ID + } + return nil +} + +func (m *FileSourceInfoUpdate) GetState() statuspb.LifeCycleState { + if m != nil { + return m.State + } + return statuspb.UNKNOWN_STATE +} + +func (m *FileSourceInfoUpdate) GetStatus() *statuspb.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *FileSourceInfoUpdate) GetAgentID() *uuidpb.UUID { + if m != nil { + return m.AgentID + } + return nil +} + +type RemoveFileSourceRequest struct { + ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *RemoveFileSourceRequest) Reset() { *m = RemoveFileSourceRequest{} } +func (*RemoveFileSourceRequest) ProtoMessage() {} +func (*RemoveFileSourceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0046fd1b9991f89c, []int{19} +} +func (m *RemoveFileSourceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveFileSourceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveFileSourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveFileSourceRequest.Merge(m, src) +} +func (m *RemoveFileSourceRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveFileSourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveFileSourceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveFileSourceRequest proto.InternalMessageInfo + +func (m *RemoveFileSourceRequest) GetID() *uuidpb.UUID { + if m != nil { + return m.ID + } + return nil +} + type ConfigUpdateRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -1137,7 +1410,7 @@ type ConfigUpdateRequest struct { func (m *ConfigUpdateRequest) Reset() { *m = ConfigUpdateRequest{} } func (*ConfigUpdateRequest) ProtoMessage() {} func (*ConfigUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{16} + return fileDescriptor_0046fd1b9991f89c, []int{20} } func (m *ConfigUpdateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1188,7 +1461,7 @@ type MetricsMessage struct { func (m *MetricsMessage) Reset() { *m = MetricsMessage{} } func (*MetricsMessage) ProtoMessage() {} func (*MetricsMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{17} + return fileDescriptor_0046fd1b9991f89c, []int{21} } func (m *MetricsMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1234,6 +1507,7 @@ func (m *MetricsMessage) GetPodName() string { func init() { proto.RegisterType((*VizierMessage)(nil), "px.vizier.messages.VizierMessage") proto.RegisterType((*TracepointMessage)(nil), "px.vizier.messages.TracepointMessage") + proto.RegisterType((*FileSourceMessage)(nil), "px.vizier.messages.FileSourceMessage") proto.RegisterType((*ConfigUpdateMessage)(nil), "px.vizier.messages.ConfigUpdateMessage") proto.RegisterType((*K8SMetadataMessage)(nil), "px.vizier.messages.K8sMetadataMessage") proto.RegisterType((*RegisterAgentRequest)(nil), "px.vizier.messages.RegisterAgentRequest") @@ -1248,6 +1522,9 @@ func init() { proto.RegisterType((*RegisterTracepointRequest)(nil), "px.vizier.messages.RegisterTracepointRequest") proto.RegisterType((*TracepointInfoUpdate)(nil), "px.vizier.messages.TracepointInfoUpdate") proto.RegisterType((*RemoveTracepointRequest)(nil), "px.vizier.messages.RemoveTracepointRequest") + proto.RegisterType((*RegisterFileSourceRequest)(nil), "px.vizier.messages.RegisterFileSourceRequest") + proto.RegisterType((*FileSourceInfoUpdate)(nil), "px.vizier.messages.FileSourceInfoUpdate") + proto.RegisterType((*RemoveFileSourceRequest)(nil), "px.vizier.messages.RemoveFileSourceRequest") proto.RegisterType((*ConfigUpdateRequest)(nil), "px.vizier.messages.ConfigUpdateRequest") proto.RegisterType((*MetricsMessage)(nil), "px.vizier.messages.MetricsMessage") } @@ -1257,104 +1534,112 @@ func init() { } var fileDescriptor_0046fd1b9991f89c = []byte{ - // 1544 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0x4b, 0x6f, 0x1b, 0x47, - 0x12, 0xe6, 0x90, 0x94, 0x44, 0x95, 0xde, 0x2d, 0xc9, 0xa6, 0xfc, 0xa0, 0xb4, 0x5c, 0x78, 0x2d, - 0xdb, 0xeb, 0xe1, 0xae, 0xbc, 0x0b, 0x0b, 0x58, 0xd8, 0x58, 0x53, 0x5c, 0x58, 0xd2, 0x46, 0x86, - 0x3d, 0x92, 0x1d, 0x40, 0x40, 0x30, 0x69, 0xce, 0xb4, 0xa8, 0x81, 0x38, 0x0f, 0x77, 0x0f, 0x15, - 0xd1, 0xb9, 0xe4, 0x27, 0xe4, 0x90, 0x53, 0x7e, 0x41, 0x6e, 0xf9, 0x05, 0x39, 0x27, 0x47, 0x1f, - 0x75, 0x12, 0x62, 0xfa, 0x12, 0x24, 0x17, 0xff, 0x84, 0xa0, 0x1f, 0xf3, 0xa0, 0x38, 0x94, 0xec, - 0x93, 0x7a, 0xaa, 0xbf, 0xfa, 0xaa, 0xbb, 0x1e, 0x5d, 0x45, 0xc1, 0x7d, 0x46, 0xad, 0xda, 0xb1, - 0xf3, 0xc6, 0x21, 0xb4, 0xe6, 0x12, 0xc6, 0x70, 0x8b, 0xb0, 0x78, 0x11, 0x34, 0xe3, 0xa5, 0x1e, - 0x50, 0x3f, 0xf4, 0x11, 0x0a, 0x4e, 0x74, 0x89, 0xd6, 0xa3, 0x9d, 0x6b, 0x0b, 0x2d, 0xbf, 0xe5, - 0x8b, 0xed, 0x1a, 0x5f, 0x49, 0xe4, 0xb5, 0x65, 0x4e, 0x8c, 0x03, 0xa7, 0x26, 0x77, 0x3a, 0x1d, - 0xc7, 0x0e, 0x9a, 0xe2, 0x8f, 0x02, 0x3c, 0xe4, 0x00, 0x0b, 0x53, 0xcf, 0x0f, 0x6b, 0x41, 0x1b, - 0x7b, 0x1e, 0xa1, 0x35, 0xdb, 0x61, 0x21, 0x75, 0x9a, 0x9d, 0x90, 0x70, 0x70, 0xea, 0xcb, 0xe4, - 0x08, 0xa5, 0xf8, 0x28, 0x4b, 0xb1, 0xeb, 0x61, 0xd7, 0xb1, 0xcc, 0x90, 0x62, 0xcb, 0xf1, 0x5a, - 0x35, 0x87, 0xd6, 0xda, 0x7e, 0xcb, 0xb1, 0x70, 0x3b, 0x68, 0x46, 0x2b, 0xa5, 0x7e, 0xe3, 0x9c, - 0x7a, 0xd0, 0xac, 0xa5, 0xc8, 0x6f, 0x89, 0x5d, 0xdf, 0x75, 0x7d, 0xaf, 0xd6, 0xc4, 0x8c, 0xd4, - 0x58, 0x88, 0xc3, 0x0e, 0xf7, 0x84, 0x5c, 0x28, 0xd8, 0x2a, 0x87, 0xb1, 0x43, 0x4c, 0x89, 0x5d, - 0x3b, 0x5a, 0xe7, 0x1e, 0x0b, 0xb1, 0x8d, 0x43, 0x2c, 0x3c, 0x26, 0x97, 0x0a, 0xf9, 0x8f, 0x94, - 0x83, 0x19, 0xa1, 0xc7, 0x8e, 0x45, 0x12, 0x78, 0x8d, 0x85, 0x3e, 0x25, 0x82, 0xdc, 0xa7, 0x44, - 0x69, 0xe8, 0x59, 0x1a, 0xca, 0x16, 0x6e, 0x11, 0x2f, 0x0c, 0x9a, 0xf2, 0xaf, 0xc4, 0x57, 0x7f, - 0x1c, 0x85, 0xa9, 0x57, 0x02, 0xbe, 0x23, 0x43, 0x82, 0xbe, 0x84, 0x2b, 0x94, 0xb4, 0x1c, 0x16, - 0x12, 0x6a, 0x0a, 0xa4, 0x49, 0xc9, 0xeb, 0x0e, 0x61, 0x61, 0x59, 0x5b, 0xd1, 0x56, 0x27, 0xd6, - 0x56, 0xf5, 0xc1, 0x30, 0xea, 0x86, 0xd2, 0x78, 0xc2, 0x15, 0x0c, 0x89, 0xdf, 0xcc, 0x19, 0x0b, - 0x34, 0x43, 0x8e, 0x2c, 0xb8, 0x3a, 0x60, 0x81, 0x05, 0xbe, 0xc7, 0x48, 0x39, 0x2f, 0x4c, 0xdc, - 0xf9, 0x08, 0x13, 0x52, 0x61, 0x33, 0x67, 0x2c, 0xd2, 0xac, 0x0d, 0xf4, 0x08, 0xc6, 0x0f, 0x09, - 0xa6, 0x61, 0x93, 0xe0, 0xb0, 0x3c, 0x22, 0x68, 0x6f, 0x66, 0xd1, 0x6e, 0x46, 0xa0, 0xcd, 0x9c, - 0x91, 0x68, 0xa0, 0xa7, 0x30, 0x15, 0x7f, 0x98, 0xd8, 0x3a, 0x2a, 0x8f, 0x0a, 0x8a, 0x95, 0x0b, - 0x29, 0x9e, 0x58, 0x47, 0x9b, 0x39, 0x63, 0xf2, 0x30, 0xf5, 0x8d, 0xb6, 0x61, 0x3a, 0x21, 0xf2, - 0x38, 0xd3, 0x98, 0x60, 0xfa, 0xcb, 0x85, 0x4c, 0xcf, 0xb0, 0xa0, 0x4a, 0xce, 0xc0, 0x05, 0xe8, - 0x0b, 0x58, 0x24, 0x27, 0xc4, 0xea, 0x84, 0xc4, 0x7c, 0xdd, 0x21, 0xb4, 0x1b, 0x47, 0xa6, 0x24, - 0x28, 0x6f, 0x67, 0x51, 0xfe, 0x4f, 0x2a, 0xbc, 0xe0, 0xf8, 0x24, 0x30, 0xf3, 0x64, 0x50, 0x8c, - 0x5e, 0x01, 0xe2, 0x25, 0x40, 0x02, 0xdf, 0xf1, 0x42, 0x53, 0x31, 0x94, 0x41, 0x70, 0xdf, 0xca, - 0xe2, 0xde, 0x8b, 0xd1, 0x2a, 0x79, 0x36, 0x73, 0xc6, 0x5c, 0x78, 0x5e, 0xc8, 0x8f, 0x6d, 0xf9, - 0xde, 0x81, 0xd3, 0x32, 0x3b, 0x81, 0x8d, 0x43, 0x12, 0x53, 0x4f, 0x0c, 0x3f, 0xf6, 0x86, 0x50, - 0x78, 0x29, 0xf0, 0x09, 0xf9, 0xbc, 0x35, 0x28, 0x46, 0xfb, 0xb0, 0x70, 0xb4, 0xce, 0xcc, 0xa8, - 0x2c, 0x62, 0xf6, 0x49, 0xc1, 0xfe, 0xb7, 0x2c, 0xf6, 0xff, 0xaf, 0xb3, 0x1d, 0x05, 0x4f, 0xc8, - 0xd1, 0xd1, 0x80, 0xb4, 0x3e, 0x02, 0x05, 0x97, 0xb5, 0xb6, 0x8b, 0xa5, 0xc2, 0x6c, 0x71, 0xbb, - 0x58, 0x2a, 0xce, 0x8e, 0x54, 0x4f, 0xf3, 0x30, 0x37, 0x70, 0x71, 0x5e, 0x35, 0x29, 0xdf, 0x39, - 0xde, 0x81, 0xaf, 0x2e, 0x7b, 0x51, 0xd5, 0x24, 0x34, 0x5b, 0xde, 0x81, 0x2f, 0x6f, 0xc5, 0xab, - 0x26, 0xcc, 0x90, 0x23, 0x07, 0x96, 0x28, 0x71, 0xfd, 0x63, 0x62, 0xa6, 0x0c, 0x45, 0x09, 0x20, - 0xeb, 0xe6, 0x5e, 0x76, 0xdd, 0x70, 0xa5, 0xc4, 0x54, 0x92, 0x04, 0x57, 0x69, 0xf6, 0x16, 0xf2, - 0xe1, 0x7a, 0x5c, 0xa0, 0x19, 0xc6, 0x0a, 0xc2, 0xd8, 0xfd, 0x8b, 0x8a, 0x34, 0xcb, 0xdc, 0x12, - 0x1d, 0xb6, 0xa9, 0xdc, 0x5c, 0xfd, 0x1a, 0xe6, 0x33, 0xe2, 0x3e, 0x98, 0x3f, 0xfd, 0x0f, 0xd2, - 0xa5, 0xf9, 0x93, 0x4a, 0x7b, 0x6b, 0x50, 0x1c, 0x19, 0xff, 0x3d, 0x0f, 0x68, 0x30, 0x2f, 0xd0, - 0x3e, 0xcc, 0xf7, 0x65, 0xd7, 0x60, 0x54, 0xe5, 0xeb, 0xaa, 0x1f, 0xad, 0x33, 0x3d, 0x79, 0xc9, - 0x75, 0x83, 0x30, 0xbf, 0x43, 0x2d, 0x12, 0x47, 0x75, 0x2e, 0x95, 0x5e, 0x2a, 0xa4, 0xc7, 0x70, - 0xc3, 0x75, 0x18, 0x73, 0xbc, 0x96, 0xd9, 0x67, 0xa3, 0x3f, 0xaa, 0x0f, 0x86, 0x1b, 0xd9, 0x91, - 0xda, 0xa9, 0x63, 0xa7, 0xdc, 0xed, 0x0e, 0xdb, 0x44, 0x5d, 0xb8, 0x39, 0xc4, 0xae, 0x7a, 0x86, - 0x65, 0x84, 0xff, 0xf5, 0x69, 0x86, 0xe3, 0x17, 0xf9, 0x9a, 0x3b, 0x74, 0x37, 0x72, 0xf6, 0x1b, - 0x58, 0xc8, 0x6a, 0x19, 0xe8, 0x31, 0x14, 0x79, 0xed, 0x28, 0xf7, 0xde, 0x4d, 0x45, 0x36, 0x6a, - 0x66, 0xd1, 0x81, 0x64, 0x13, 0x13, 0xca, 0xbc, 0x48, 0x0c, 0xa1, 0x87, 0x6e, 0x40, 0x11, 0x33, - 0xc7, 0x16, 0x17, 0x98, 0xaa, 0x97, 0x7a, 0x67, 0xcb, 0xc5, 0x27, 0xbb, 0x5b, 0x0d, 0x43, 0x48, - 0xb7, 0x8b, 0xa5, 0xfc, 0x6c, 0xa1, 0xfa, 0x1f, 0x58, 0xcc, 0xec, 0x25, 0xb1, 0xb2, 0x76, 0x81, - 0xb2, 0x05, 0x53, 0x42, 0xa9, 0x81, 0x43, 0xcc, 0xed, 0x22, 0x03, 0xa6, 0x62, 0xff, 0xa5, 0x8e, - 0x2e, 0xaa, 0x43, 0x0e, 0x0a, 0xba, 0x9a, 0x33, 0xf4, 0xbe, 0x01, 0x45, 0x8f, 0x5c, 0x23, 0x4e, - 0x3f, 0xe9, 0xa6, 0xbe, 0xaa, 0x7f, 0xe4, 0x61, 0x46, 0x58, 0x91, 0x79, 0x22, 0xec, 0x3c, 0x86, - 0x51, 0x66, 0x1d, 0x12, 0x17, 0x97, 0xf3, 0x2b, 0x85, 0x73, 0xef, 0x5a, 0xec, 0x9b, 0x78, 0x7c, - 0xd8, 0xc3, 0xcd, 0xb6, 0xd0, 0x33, 0x94, 0x16, 0x7a, 0x01, 0x33, 0x01, 0xf5, 0x2d, 0xc2, 0x98, - 0x69, 0x51, 0x82, 0x43, 0x62, 0x97, 0x8b, 0x82, 0xe8, 0x82, 0x1c, 0x7e, 0x2e, 0x15, 0x36, 0x24, - 0xde, 0x98, 0x0e, 0xfa, 0xbe, 0xd1, 0x3e, 0xa0, 0x88, 0x32, 0x24, 0xd4, 0x75, 0x3c, 0xc1, 0x3a, - 0x22, 0x58, 0xef, 0x5d, 0xca, 0xba, 0x17, 0xab, 0x18, 0x73, 0xc1, 0x79, 0x11, 0xfa, 0x3b, 0x20, - 0xdb, 0x27, 0x2c, 0xaa, 0x78, 0x75, 0x75, 0xde, 0x84, 0x4b, 0xc6, 0x2c, 0xdf, 0x91, 0xae, 0xd9, - 0x95, 0x97, 0xfb, 0x37, 0x14, 0x39, 0xf9, 0x45, 0xad, 0xb5, 0x2f, 0x6a, 0x86, 0x80, 0xcb, 0x67, - 0xbd, 0xfa, 0xb3, 0x06, 0xe3, 0x71, 0xe3, 0x45, 0x0f, 0xa1, 0x24, 0x67, 0x12, 0x95, 0x08, 0x13, - 0x6b, 0x33, 0x9c, 0x4e, 0x8e, 0xa0, 0xfa, 0xcb, 0x97, 0x5b, 0x8d, 0xfa, 0x44, 0xef, 0x6c, 0x79, - 0x4c, 0x66, 0x5e, 0xc3, 0x18, 0x13, 0xe8, 0x2d, 0x1b, 0x21, 0x28, 0x86, 0x8e, 0x2b, 0x47, 0x98, - 0x82, 0x21, 0xd6, 0xa8, 0x01, 0x13, 0xea, 0x02, 0x22, 0x35, 0x64, 0x59, 0xfd, 0x75, 0xe8, 0xf1, - 0x92, 0x70, 0x1b, 0xd0, 0x49, 0x42, 0x7f, 0x1b, 0x66, 0x18, 0xaf, 0x0f, 0xcf, 0x22, 0xa6, 0xd7, - 0x71, 0x9b, 0x84, 0x96, 0x8b, 0xc2, 0xc8, 0x74, 0x24, 0x7e, 0x26, 0xa4, 0xd5, 0x2e, 0xa0, 0xfe, - 0x17, 0x46, 0xa8, 0xaf, 0xc1, 0xa4, 0x4a, 0x10, 0xd3, 0x72, 0x6c, 0x2a, 0x0e, 0x38, 0x5e, 0x9f, - 0xe9, 0x9d, 0x2d, 0x4f, 0xec, 0x4a, 0xf9, 0xc6, 0x56, 0xc3, 0x30, 0x26, 0x14, 0x68, 0xc3, 0xb1, - 0x29, 0xba, 0x03, 0xe3, 0x81, 0x6f, 0x0b, 0x3c, 0x2b, 0x17, 0x56, 0x0a, 0xab, 0xe3, 0xf5, 0xc9, - 0xde, 0xd9, 0x72, 0xe9, 0xb9, 0x6f, 0x73, 0x30, 0x33, 0x4a, 0x81, 0x6f, 0x73, 0x24, 0xdb, 0x2e, - 0x96, 0xb4, 0xd9, 0x7c, 0xf5, 0x3b, 0x0d, 0x26, 0xd3, 0x73, 0x50, 0xec, 0x0e, 0x2d, 0xe5, 0x8e, - 0x8c, 0x8b, 0xe4, 0xb3, 0x2e, 0x82, 0x9e, 0x66, 0xf9, 0x2d, 0xb3, 0x93, 0x0f, 0xde, 0x37, 0xed, - 0xba, 0x6a, 0x0d, 0xa6, 0xfa, 0x66, 0x2a, 0x54, 0x01, 0xa0, 0x24, 0x6a, 0x44, 0xe2, 0x70, 0x25, - 0x23, 0x25, 0xa9, 0x7e, 0xaf, 0xc1, 0x7c, 0xc6, 0xc8, 0xc4, 0xd3, 0x42, 0x8e, 0x5c, 0x97, 0xa4, - 0x85, 0x50, 0xe2, 0x69, 0x21, 0xd0, 0x5b, 0x36, 0xba, 0x0b, 0x45, 0x5e, 0xff, 0xea, 0x0e, 0x57, - 0xce, 0x3d, 0x0b, 0xbc, 0x1c, 0xda, 0xd8, 0x33, 0x04, 0x06, 0x95, 0x61, 0x0c, 0x7b, 0xb8, 0xdd, - 0x7d, 0x43, 0x44, 0x80, 0x4b, 0x46, 0xf4, 0xa9, 0x1e, 0x9f, 0x9f, 0x34, 0x58, 0x1a, 0xda, 0x61, - 0xd1, 0x57, 0xb0, 0x98, 0x6a, 0xd6, 0x36, 0x09, 0xda, 0x7e, 0xd7, 0x25, 0x5e, 0xd4, 0x26, 0xeb, - 0x59, 0x2f, 0x52, 0xff, 0x2f, 0x1f, 0xdd, 0xa1, 0x7a, 0xf4, 0x7b, 0x27, 0xe1, 0x6f, 0xc4, 0x4c, - 0xe9, 0xc9, 0x24, 0x91, 0xa2, 0xdb, 0x90, 0x77, 0x6c, 0xd5, 0xac, 0x06, 0xbc, 0x32, 0xda, 0x3b, - 0x5b, 0xce, 0x6f, 0x35, 0x8c, 0xbc, 0x63, 0x57, 0x4f, 0x35, 0x58, 0xc8, 0x9a, 0x79, 0x14, 0x83, - 0x76, 0x29, 0x03, 0xfa, 0x27, 0x8c, 0xf0, 0x9f, 0x52, 0xb2, 0xca, 0xa6, 0xd7, 0xae, 0x8b, 0x57, - 0x46, 0xfd, 0xc8, 0xd2, 0x3f, 0x73, 0x0e, 0xc8, 0x46, 0xd7, 0x6a, 0x93, 0x5d, 0x0e, 0x31, 0x24, - 0x12, 0xdd, 0x83, 0x51, 0x89, 0x50, 0x21, 0x98, 0xef, 0xd3, 0xd9, 0x15, 0x0b, 0x43, 0x41, 0xfa, - 0xaa, 0xbf, 0xf8, 0x09, 0xd5, 0x5f, 0xad, 0xc3, 0xd5, 0x21, 0x83, 0xd6, 0x47, 0x5f, 0xae, 0xfa, - 0xa8, 0x7f, 0xfc, 0x89, 0xf4, 0x67, 0xa1, 0x70, 0x44, 0xba, 0x82, 0x60, 0xdc, 0xe0, 0x4b, 0xb4, - 0x00, 0x23, 0xc7, 0xb8, 0xdd, 0x91, 0x5e, 0x18, 0x37, 0xe4, 0x47, 0xf5, 0x73, 0x98, 0xde, 0x21, - 0x21, 0x75, 0x2c, 0x16, 0xcd, 0x2e, 0x77, 0x81, 0xbf, 0xac, 0x2e, 0x6f, 0xf0, 0x5c, 0x6c, 0x86, - 0xe4, 0x24, 0x54, 0x3c, 0xbc, 0x19, 0xb8, 0x0a, 0xbe, 0x47, 0x4e, 0x42, 0xb4, 0x04, 0xbc, 0xa4, - 0x4d, 0x0f, 0xbb, 0x11, 0xed, 0x58, 0xe0, 0xdb, 0xcf, 0xb0, 0x4b, 0xea, 0xff, 0x7d, 0xfb, 0xae, - 0x92, 0x3b, 0x7d, 0x57, 0xc9, 0x7d, 0x78, 0x57, 0xd1, 0xbe, 0xe9, 0x55, 0xb4, 0x1f, 0x7a, 0x15, - 0xed, 0x97, 0x5e, 0x45, 0x7b, 0xdb, 0xab, 0x68, 0xbf, 0xf6, 0x2a, 0xda, 0x6f, 0xbd, 0x4a, 0xee, - 0x43, 0xaf, 0xa2, 0x7d, 0xfb, 0xbe, 0x92, 0x7b, 0xfb, 0xbe, 0x92, 0x3b, 0x7d, 0x5f, 0xc9, 0xed, - 0x43, 0xf2, 0x8f, 0x80, 0xe6, 0xa8, 0xf8, 0xb1, 0xf9, 0xe0, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x5c, 0x2e, 0x01, 0x20, 0x31, 0x10, 0x00, 0x00, + // 1680 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4d, 0x53, 0x1b, 0xcd, + 0x11, 0xd6, 0x4a, 0x02, 0x44, 0xf3, 0x3d, 0x80, 0x11, 0x18, 0x0b, 0xa2, 0x94, 0x63, 0x6c, 0xc7, + 0xab, 0x04, 0x27, 0x31, 0x55, 0x29, 0xbb, 0x62, 0xa1, 0xc4, 0x40, 0x82, 0xcb, 0x5e, 0xb0, 0x5d, + 0x45, 0x55, 0x6a, 0x33, 0xda, 0x1d, 0x89, 0x0d, 0xda, 0x0f, 0xcf, 0xae, 0x08, 0x72, 0x2e, 0x39, + 0xe6, 0x98, 0x43, 0x4e, 0xf9, 0x05, 0x39, 0xe7, 0x9e, 0x73, 0x72, 0xf4, 0x91, 0xaa, 0x54, 0x51, + 0xb1, 0x7c, 0x49, 0xe5, 0xbd, 0xf8, 0x27, 0xbc, 0x35, 0x1f, 0xfb, 0x21, 0x76, 0x05, 0xf8, 0xfa, + 0x9e, 0x98, 0xed, 0x79, 0xfa, 0xe9, 0x99, 0xa7, 0xa7, 0x67, 0x5a, 0xc0, 0x23, 0x9f, 0x1a, 0xb5, + 0x53, 0xeb, 0x83, 0x45, 0x68, 0xcd, 0x26, 0xbe, 0x8f, 0xdb, 0xc4, 0x8f, 0x06, 0x5e, 0x33, 0x1a, + 0xaa, 0x1e, 0x75, 0x03, 0x17, 0x21, 0xef, 0x4c, 0x15, 0x68, 0x35, 0x9c, 0x59, 0x59, 0x68, 0xbb, + 0x6d, 0x97, 0x4f, 0xd7, 0xd8, 0x48, 0x20, 0x57, 0xd6, 0x18, 0x31, 0xf6, 0xac, 0x9a, 0x98, 0xe9, + 0x76, 0x2d, 0xd3, 0x6b, 0xf2, 0x3f, 0x12, 0xf0, 0x84, 0x01, 0x0c, 0x4c, 0x1d, 0x37, 0xa8, 0x79, + 0x1d, 0xec, 0x38, 0x84, 0xd6, 0x4c, 0xcb, 0x0f, 0xa8, 0xd5, 0xec, 0x06, 0x84, 0x81, 0x13, 0x5f, + 0x3a, 0x43, 0x48, 0xc7, 0xa7, 0x59, 0x8e, 0x3d, 0x07, 0xdb, 0x96, 0xa1, 0x07, 0x14, 0x1b, 0x96, + 0xd3, 0xae, 0x59, 0xb4, 0xd6, 0x71, 0xdb, 0x96, 0x81, 0x3b, 0x5e, 0x33, 0x1c, 0x49, 0xf7, 0x5a, + 0x86, 0x7b, 0xcb, 0xea, 0x10, 0xdd, 0x77, 0xbb, 0xd4, 0x20, 0x09, 0x57, 0xe9, 0xb0, 0x7a, 0xc9, + 0xc1, 0x6b, 0xd6, 0x12, 0xab, 0xb9, 0xcb, 0x67, 0x5d, 0xdb, 0x76, 0x9d, 0x5a, 0x13, 0xfb, 0xa4, + 0xe6, 0x07, 0x38, 0xe8, 0x32, 0xe9, 0xc4, 0x40, 0xc2, 0x36, 0x18, 0xcc, 0x3f, 0xc6, 0x94, 0x98, + 0xb5, 0x93, 0x2d, 0x26, 0x71, 0x80, 0x4d, 0x1c, 0x60, 0x2e, 0xb1, 0x18, 0x4a, 0xe4, 0x8f, 0x12, + 0x19, 0xf1, 0x09, 0x3d, 0xb5, 0x0c, 0x12, 0xc3, 0x6b, 0x7e, 0xe0, 0x52, 0xc2, 0xc9, 0x5d, 0x4a, + 0xa4, 0x87, 0x9a, 0xe5, 0x21, 0x63, 0xe1, 0x36, 0x71, 0x02, 0xaf, 0x29, 0xfe, 0x0a, 0x7c, 0xf5, + 0xcf, 0x63, 0x30, 0xf5, 0x96, 0xc3, 0xf7, 0x45, 0x0e, 0xd1, 0xef, 0xe0, 0x16, 0x25, 0x6d, 0xcb, + 0x0f, 0x08, 0xd5, 0x39, 0x52, 0xa7, 0xe4, 0x7d, 0x97, 0xf8, 0x41, 0x59, 0x59, 0x57, 0x36, 0x26, + 0x36, 0x37, 0xd4, 0x74, 0xde, 0x55, 0x4d, 0x7a, 0x3c, 0x67, 0x0e, 0x9a, 0xc0, 0xef, 0xe4, 0xb4, + 0x05, 0x9a, 0x61, 0x47, 0x06, 0x2c, 0xa5, 0x22, 0xf8, 0x9e, 0xeb, 0xf8, 0xa4, 0x9c, 0xe7, 0x21, + 0xee, 0xdf, 0x20, 0x84, 0x70, 0xd8, 0xc9, 0x69, 0x8b, 0x34, 0x6b, 0x02, 0x3d, 0x85, 0xf1, 0x63, + 0x82, 0x69, 0xd0, 0x24, 0x38, 0x28, 0x8f, 0x70, 0xda, 0x3b, 0x59, 0xb4, 0x3b, 0x21, 0x68, 0x27, + 0xa7, 0xc5, 0x1e, 0xe8, 0x05, 0x4c, 0x45, 0x1f, 0x3a, 0x36, 0x4e, 0xca, 0xa3, 0x9c, 0x62, 0xfd, + 0x4a, 0x8a, 0xe7, 0xc6, 0xc9, 0x4e, 0x4e, 0x9b, 0x3c, 0x4e, 0x7c, 0xa3, 0x3d, 0x98, 0x8e, 0x89, + 0x1c, 0xc6, 0x34, 0xc6, 0x99, 0xbe, 0x77, 0x25, 0xd3, 0x4b, 0xcc, 0xa9, 0xe2, 0x35, 0x30, 0x03, + 0xfa, 0x2d, 0x2c, 0x92, 0x33, 0x62, 0x74, 0x03, 0xa2, 0xbf, 0xef, 0x12, 0xda, 0x8b, 0x32, 0x53, + 0xe2, 0x94, 0xf7, 0xb2, 0x28, 0x7f, 0x29, 0x1c, 0x5e, 0x33, 0x7c, 0x9c, 0x98, 0x79, 0x92, 0x36, + 0xa3, 0xb7, 0x80, 0x58, 0xcd, 0x10, 0xcf, 0xb5, 0x9c, 0x40, 0x97, 0x0c, 0x65, 0xe0, 0xdc, 0x77, + 0xb3, 0xb8, 0x0f, 0x23, 0xb4, 0x3c, 0x3c, 0x3b, 0x39, 0x6d, 0x2e, 0xb8, 0x6c, 0x64, 0xcb, 0x36, + 0x5c, 0xa7, 0x65, 0xb5, 0xf5, 0xae, 0x67, 0xe2, 0x80, 0x44, 0xd4, 0x13, 0xc3, 0x97, 0xbd, 0xcd, + 0x1d, 0xde, 0x70, 0x7c, 0x4c, 0x3e, 0x6f, 0xa4, 0xcd, 0xe8, 0x08, 0x16, 0x4e, 0xb6, 0x7c, 0x3d, + 0x2c, 0x8b, 0x88, 0x7d, 0x92, 0xb3, 0xff, 0x20, 0x8b, 0xfd, 0xd7, 0x5b, 0xfe, 0xbe, 0x84, 0xc7, + 0xe4, 0xe8, 0x24, 0x65, 0x45, 0xef, 0x60, 0x3e, 0x71, 0x1f, 0x44, 0xd4, 0x53, 0xc3, 0x35, 0xf9, + 0x95, 0xd5, 0x21, 0x07, 0x1c, 0x9d, 0xd0, 0xa4, 0x75, 0xd9, 0x58, 0x1f, 0x81, 0x82, 0xed, 0xb7, + 0xf7, 0x8a, 0xa5, 0xc2, 0x6c, 0x71, 0xaf, 0x58, 0x2a, 0xce, 0x8e, 0x54, 0xcf, 0xf3, 0x30, 0x97, + 0x52, 0x94, 0x95, 0x63, 0x22, 0x29, 0x96, 0xd3, 0x72, 0xa5, 0x8a, 0x57, 0x95, 0x63, 0x4c, 0xb3, + 0xeb, 0xb4, 0x5c, 0x21, 0x17, 0x2b, 0xc7, 0x20, 0xc3, 0x8e, 0x2c, 0x58, 0xa6, 0xc4, 0x76, 0x4f, + 0x89, 0x9e, 0x08, 0x14, 0x9e, 0x2c, 0x51, 0x90, 0x0f, 0xb3, 0x0b, 0x92, 0x39, 0xc5, 0xa1, 0xe2, + 0xd3, 0xb5, 0x44, 0xb3, 0xa7, 0x90, 0x0b, 0xb7, 0xa3, 0xca, 0xcf, 0x08, 0x56, 0xe0, 0xc1, 0x1e, + 0x5d, 0x55, 0xfd, 0x59, 0xe1, 0x96, 0xe9, 0xb0, 0x49, 0x29, 0x73, 0xf5, 0x3f, 0x79, 0x98, 0x4b, + 0x25, 0x06, 0x61, 0x58, 0x4a, 0x26, 0xf7, 0x86, 0xda, 0xc6, 0x3c, 0x83, 0xda, 0xb6, 0x32, 0xec, + 0xe8, 0xf7, 0xb0, 0x22, 0xb5, 0x4d, 0x46, 0xba, 0xb1, 0xb8, 0x71, 0xac, 0x94, 0xb8, 0xa9, 0x29, + 0xe4, 0xc1, 0x6a, 0x24, 0x6e, 0x56, 0xb4, 0x1b, 0xa8, 0x9b, 0x15, 0x2f, 0x52, 0x37, 0x35, 0x19, + 0xaa, 0xfb, 0x47, 0x98, 0xcf, 0x28, 0xd7, 0x74, 0xd9, 0x0f, 0xbe, 0x23, 0xd7, 0x96, 0x7d, 0xe2, + 0xb6, 0x32, 0xd2, 0xe6, 0x30, 0xf8, 0xff, 0xf3, 0x80, 0xd2, 0xe5, 0x8c, 0x8e, 0x60, 0x7e, 0xe0, + 0x52, 0x48, 0xe7, 0x55, 0x3c, 0x8a, 0xea, 0xc9, 0x96, 0xaf, 0xc6, 0x0f, 0xb0, 0xaa, 0x11, 0xa1, + 0x5a, 0x94, 0xd7, 0xb9, 0xc4, 0xad, 0x20, 0x93, 0x7a, 0x0a, 0xab, 0xb6, 0xe5, 0xfb, 0x96, 0xd3, + 0xd6, 0x07, 0x62, 0x0c, 0xa6, 0xf5, 0xf1, 0xf0, 0x20, 0xfb, 0xc2, 0x3b, 0xb1, 0xec, 0x84, 0xdc, + 0xf6, 0xb0, 0x49, 0xd4, 0x83, 0x3b, 0x43, 0xe2, 0xca, 0xd7, 0x53, 0x64, 0xf8, 0x27, 0x5f, 0x17, + 0x38, 0x7a, 0x48, 0x57, 0xec, 0xa1, 0xb3, 0xa1, 0xd8, 0x1f, 0x60, 0x21, 0xeb, 0xa5, 0x47, 0xcf, + 0xa0, 0xc8, 0xaa, 0x47, 0xca, 0xfb, 0x20, 0x91, 0xd9, 0xb0, 0x07, 0x09, 0x17, 0x24, 0x7a, 0x0f, + 0xee, 0xcc, 0xca, 0x44, 0xe3, 0x7e, 0x68, 0x15, 0x8a, 0xd8, 0xb7, 0x4c, 0xbe, 0x81, 0xa9, 0x7a, + 0xa9, 0x7f, 0xb1, 0x56, 0x7c, 0x7e, 0xb0, 0xdb, 0xd0, 0xb8, 0x75, 0xaf, 0x58, 0xca, 0xcf, 0x16, + 0xaa, 0x3f, 0x87, 0xc5, 0xcc, 0x16, 0x20, 0x72, 0x56, 0xae, 0x70, 0x36, 0x60, 0x8a, 0x3b, 0x35, + 0x70, 0x80, 0x59, 0x5c, 0xa4, 0xc1, 0x54, 0xa4, 0x5f, 0x62, 0xe9, 0xbc, 0x3a, 0x44, 0x7f, 0xa7, + 0xca, 0x86, 0x50, 0x1d, 0x68, 0x44, 0xd5, 0x50, 0x1a, 0xbe, 0xfa, 0x49, 0x3b, 0xf1, 0x55, 0xfd, + 0x26, 0x0f, 0x33, 0x3c, 0x8a, 0x38, 0x27, 0x3c, 0xce, 0x33, 0x18, 0xf5, 0x8d, 0x63, 0x62, 0xe3, + 0x72, 0x7e, 0xbd, 0x70, 0xe9, 0x39, 0x8a, 0xb4, 0x89, 0xba, 0xbe, 0x43, 0xdc, 0xec, 0x70, 0x3f, + 0x4d, 0x7a, 0xa1, 0xd7, 0x30, 0xe3, 0x51, 0xd7, 0x20, 0xbe, 0xaf, 0x1b, 0x94, 0xe0, 0x80, 0x98, + 0xe5, 0x22, 0x27, 0xba, 0xe2, 0x0c, 0xbf, 0x12, 0x0e, 0xdb, 0x02, 0xaf, 0x4d, 0x7b, 0x03, 0xdf, + 0xe8, 0x08, 0x50, 0x48, 0x19, 0x10, 0x6a, 0x5b, 0x0e, 0x67, 0x1d, 0xe1, 0xac, 0x0f, 0xaf, 0x65, + 0x3d, 0x8c, 0x5c, 0xb4, 0x39, 0xef, 0xb2, 0x09, 0xfd, 0x10, 0x90, 0xe9, 0x12, 0x3f, 0xac, 0x78, + 0xb9, 0x75, 0xd6, 0x3b, 0x95, 0xb4, 0x59, 0x36, 0x23, 0xa4, 0x39, 0x10, 0x9b, 0xfb, 0x29, 0x14, + 0x19, 0xf9, 0x55, 0x1d, 0xd1, 0x40, 0xd6, 0x34, 0x0e, 0x17, 0x8f, 0x66, 0xf5, 0x5f, 0x0a, 0x8c, + 0x47, 0xfd, 0x12, 0x7a, 0x02, 0x25, 0xd1, 0x4a, 0xca, 0x83, 0x30, 0xb1, 0x39, 0xc3, 0xe8, 0xc4, + 0x4f, 0x0d, 0xf5, 0xcd, 0x9b, 0xdd, 0x46, 0x7d, 0xa2, 0x7f, 0xb1, 0x36, 0x26, 0x4e, 0x5e, 0x43, + 0x1b, 0xe3, 0xe8, 0x5d, 0x13, 0x21, 0x28, 0x06, 0x96, 0x2d, 0x3a, 0xcf, 0x82, 0xc6, 0xc7, 0xa8, + 0x01, 0x13, 0x72, 0x03, 0xfc, 0x68, 0x88, 0xb2, 0xfa, 0xfe, 0xd0, 0xe5, 0xc5, 0xe9, 0xd6, 0xa0, + 0x1b, 0xa7, 0xfe, 0x1e, 0xcc, 0xf8, 0xac, 0x3e, 0x1c, 0x83, 0xe8, 0x4e, 0xd7, 0x6e, 0x12, 0x5a, + 0x2e, 0xf2, 0x20, 0xd3, 0xa1, 0xf9, 0x25, 0xb7, 0x56, 0x7b, 0x80, 0x06, 0x6f, 0x18, 0xee, 0xbe, + 0x09, 0x93, 0xf2, 0x80, 0xe8, 0x86, 0x65, 0x52, 0xbe, 0xc0, 0xf1, 0xfa, 0x4c, 0xff, 0x62, 0x6d, + 0xe2, 0x40, 0xd8, 0xb7, 0x77, 0x1b, 0x9a, 0x36, 0x21, 0x41, 0xdb, 0x96, 0x49, 0xd1, 0x7d, 0x18, + 0xf7, 0x5c, 0x93, 0xe3, 0xfd, 0x72, 0x61, 0xbd, 0xb0, 0x31, 0x5e, 0x9f, 0xec, 0x5f, 0xac, 0x95, + 0x5e, 0xb9, 0x26, 0x03, 0xfb, 0x5a, 0xc9, 0x73, 0x4d, 0x86, 0xf4, 0xf7, 0x8a, 0x25, 0x65, 0x36, + 0x5f, 0xfd, 0xab, 0x02, 0x93, 0xc9, 0xf6, 0x35, 0x92, 0x43, 0x49, 0xc8, 0x91, 0xb1, 0x91, 0x7c, + 0xd6, 0x46, 0xd0, 0x8b, 0x2c, 0xdd, 0x32, 0x1b, 0xb0, 0xf4, 0x7e, 0x93, 0xd2, 0x55, 0x6b, 0x30, + 0x35, 0xd0, 0x0a, 0xa3, 0x0a, 0x00, 0x25, 0xe1, 0x43, 0xc4, 0x17, 0x57, 0xd2, 0x12, 0x96, 0xea, + 0xdf, 0x14, 0x98, 0xcf, 0xe8, 0x74, 0xd9, 0xb1, 0x10, 0x9d, 0xf2, 0x35, 0xc7, 0x82, 0x3b, 0xb1, + 0x63, 0xc1, 0xd1, 0xbb, 0x26, 0x7a, 0x00, 0x45, 0x56, 0xff, 0x72, 0x0f, 0xb7, 0x2e, 0x5d, 0x0b, + 0xac, 0x1c, 0x3a, 0xd8, 0xd1, 0x38, 0x06, 0x95, 0x61, 0x0c, 0x3b, 0xb8, 0xd3, 0xfb, 0x40, 0x78, + 0x82, 0x4b, 0x5a, 0xf8, 0x29, 0x2f, 0x9f, 0x7f, 0x2a, 0xb0, 0x3c, 0xb4, 0x7f, 0x41, 0x7f, 0x80, + 0xc5, 0x44, 0x2b, 0x64, 0x12, 0xaf, 0xe3, 0xf6, 0x6c, 0xe2, 0x84, 0xcf, 0x64, 0x3d, 0xeb, 0x46, + 0x1a, 0xfc, 0x85, 0xab, 0x5a, 0x54, 0x0d, 0x7f, 0xa6, 0xc6, 0xfc, 0x8d, 0x88, 0x29, 0xd9, 0xf7, + 0xc5, 0x56, 0x74, 0x0f, 0xf2, 0x96, 0x29, 0x1f, 0xab, 0x94, 0x2a, 0xa3, 0xfd, 0x8b, 0xb5, 0xfc, + 0x6e, 0x43, 0xcb, 0x5b, 0x66, 0xf5, 0x5c, 0x81, 0x85, 0xac, 0x8e, 0x52, 0x32, 0x28, 0xd7, 0x32, + 0xa0, 0x1f, 0xc3, 0x08, 0xfb, 0x05, 0x2c, 0xaa, 0x6c, 0x7a, 0xf3, 0x36, 0xbf, 0x65, 0xe4, 0x6f, + 0x63, 0xf5, 0x37, 0x56, 0x8b, 0x6c, 0xf7, 0x8c, 0x0e, 0x39, 0x60, 0x10, 0x4d, 0x20, 0xd1, 0x43, + 0x18, 0x15, 0x08, 0x99, 0x82, 0xf9, 0x01, 0x9f, 0x03, 0x3e, 0xd0, 0x24, 0x64, 0xa0, 0xfa, 0x8b, + 0x5f, 0x51, 0xfd, 0xd5, 0x3a, 0x2c, 0x0d, 0x69, 0x63, 0x6f, 0xbc, 0xb9, 0xea, 0x3f, 0x12, 0xe9, + 0x4d, 0x77, 0x65, 0x1d, 0xb8, 0x95, 0x6c, 0xc6, 0x52, 0xf9, 0xfd, 0x59, 0x46, 0x7e, 0x13, 0x0e, + 0x2c, 0xb7, 0x31, 0x69, 0x32, 0xa7, 0xad, 0x0c, 0xeb, 0xd7, 0xe5, 0x34, 0xab, 0x93, 0xfd, 0x2e, + 0xe5, 0x34, 0x9d, 0x8c, 0x1b, 0xe7, 0xf4, 0xe9, 0x60, 0x4b, 0x1b, 0xfa, 0xcf, 0x42, 0xe1, 0x84, + 0xf4, 0x38, 0xc1, 0xb8, 0xc6, 0x86, 0x68, 0x01, 0x46, 0x4e, 0x71, 0xa7, 0x2b, 0x54, 0x18, 0xd7, + 0xc4, 0x47, 0xf5, 0x1d, 0x4c, 0xef, 0x93, 0x80, 0x5a, 0x86, 0x1f, 0xf6, 0xa3, 0x0f, 0x80, 0xbd, + 0x96, 0x36, 0x6b, 0xda, 0x98, 0x59, 0x0f, 0xc8, 0x59, 0x20, 0x79, 0xd8, 0x03, 0x6f, 0x4b, 0xf8, + 0x21, 0x39, 0x0b, 0xd0, 0x32, 0xb0, 0x6b, 0x5a, 0x77, 0xb0, 0x1d, 0xd2, 0x8e, 0x79, 0xae, 0xf9, + 0x12, 0xdb, 0xa4, 0xfe, 0x8b, 0x8f, 0x9f, 0x2a, 0xb9, 0xf3, 0x4f, 0x95, 0xdc, 0x97, 0x4f, 0x15, + 0xe5, 0x4f, 0xfd, 0x8a, 0xf2, 0xf7, 0x7e, 0x45, 0xf9, 0x77, 0xbf, 0xa2, 0x7c, 0xec, 0x57, 0x94, + 0xff, 0xf6, 0x2b, 0xca, 0xff, 0xfa, 0x95, 0xdc, 0x97, 0x7e, 0x45, 0xf9, 0xcb, 0xe7, 0x4a, 0xee, + 0xe3, 0xe7, 0x4a, 0xee, 0xfc, 0x73, 0x25, 0x77, 0x04, 0xf1, 0x3f, 0xf1, 0x9a, 0xa3, 0xfc, 0xff, + 0x3e, 0x8f, 0xbf, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xac, 0xf1, 0xdd, 0xed, 0x13, 0x00, 0x00, } func (this *VizierMessage) Equal(that interface{}) bool { @@ -1603,6 +1888,30 @@ func (this *VizierMessage_K8SMetadataMessage) Equal(that interface{}) bool { } return true } +func (this *VizierMessage_FileSourceMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VizierMessage_FileSourceMessage) + if !ok { + that2, ok := that.(VizierMessage_FileSourceMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.FileSourceMessage.Equal(that1.FileSourceMessage) { + return false + } + return true +} func (this *TracepointMessage) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1705,14 +2014,14 @@ func (this *TracepointMessage_RegisterTracepointRequest) Equal(that interface{}) } return true } -func (this *ConfigUpdateMessage) Equal(that interface{}) bool { +func (this *FileSourceMessage) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*ConfigUpdateMessage) + that1, ok := that.(*FileSourceMessage) if !ok { - that2, ok := that.(ConfigUpdateMessage) + that2, ok := that.(FileSourceMessage) if ok { that1 = &that2 } else { @@ -1735,14 +2044,14 @@ func (this *ConfigUpdateMessage) Equal(that interface{}) bool { } return true } -func (this *ConfigUpdateMessage_ConfigUpdateRequest) Equal(that interface{}) bool { +func (this *FileSourceMessage_FileSourceInfoUpdate) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*ConfigUpdateMessage_ConfigUpdateRequest) + that1, ok := that.(*FileSourceMessage_FileSourceInfoUpdate) if !ok { - that2, ok := that.(ConfigUpdateMessage_ConfigUpdateRequest) + that2, ok := that.(FileSourceMessage_FileSourceInfoUpdate) if ok { that1 = &that2 } else { @@ -1754,19 +2063,19 @@ func (this *ConfigUpdateMessage_ConfigUpdateRequest) Equal(that interface{}) boo } else if this == nil { return false } - if !this.ConfigUpdateRequest.Equal(that1.ConfigUpdateRequest) { + if !this.FileSourceInfoUpdate.Equal(that1.FileSourceInfoUpdate) { return false } return true } -func (this *K8SMetadataMessage) Equal(that interface{}) bool { +func (this *FileSourceMessage_RemoveFileSourceRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*K8SMetadataMessage) + that1, ok := that.(*FileSourceMessage_RemoveFileSourceRequest) if !ok { - that2, ok := that.(K8SMetadataMessage) + that2, ok := that.(FileSourceMessage_RemoveFileSourceRequest) if ok { that1 = &that2 } else { @@ -1778,25 +2087,19 @@ func (this *K8SMetadataMessage) Equal(that interface{}) bool { } else if this == nil { return false } - if that1.Msg == nil { - if this.Msg != nil { - return false - } - } else if this.Msg == nil { - return false - } else if !this.Msg.Equal(that1.Msg) { + if !this.RemoveFileSourceRequest.Equal(that1.RemoveFileSourceRequest) { return false } return true } -func (this *K8SMetadataMessage_K8SMetadataUpdate) Equal(that interface{}) bool { +func (this *FileSourceMessage_RegisterFileSourceRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*K8SMetadataMessage_K8SMetadataUpdate) + that1, ok := that.(*FileSourceMessage_RegisterFileSourceRequest) if !ok { - that2, ok := that.(K8SMetadataMessage_K8SMetadataUpdate) + that2, ok := that.(FileSourceMessage_RegisterFileSourceRequest) if ok { that1 = &that2 } else { @@ -1808,19 +2111,19 @@ func (this *K8SMetadataMessage_K8SMetadataUpdate) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.K8SMetadataUpdate.Equal(that1.K8SMetadataUpdate) { + if !this.RegisterFileSourceRequest.Equal(that1.RegisterFileSourceRequest) { return false } return true } -func (this *K8SMetadataMessage_MissingK8SMetadataRequest) Equal(that interface{}) bool { +func (this *ConfigUpdateMessage) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*K8SMetadataMessage_MissingK8SMetadataRequest) + that1, ok := that.(*ConfigUpdateMessage) if !ok { - that2, ok := that.(K8SMetadataMessage_MissingK8SMetadataRequest) + that2, ok := that.(ConfigUpdateMessage) if ok { that1 = &that2 } else { @@ -1832,7 +2135,115 @@ func (this *K8SMetadataMessage_MissingK8SMetadataRequest) Equal(that interface{} } else if this == nil { return false } - if !this.MissingK8SMetadataRequest.Equal(that1.MissingK8SMetadataRequest) { + if that1.Msg == nil { + if this.Msg != nil { + return false + } + } else if this.Msg == nil { + return false + } else if !this.Msg.Equal(that1.Msg) { + return false + } + return true +} +func (this *ConfigUpdateMessage_ConfigUpdateRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ConfigUpdateMessage_ConfigUpdateRequest) + if !ok { + that2, ok := that.(ConfigUpdateMessage_ConfigUpdateRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ConfigUpdateRequest.Equal(that1.ConfigUpdateRequest) { + return false + } + return true +} +func (this *K8SMetadataMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*K8SMetadataMessage) + if !ok { + that2, ok := that.(K8SMetadataMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Msg == nil { + if this.Msg != nil { + return false + } + } else if this.Msg == nil { + return false + } else if !this.Msg.Equal(that1.Msg) { + return false + } + return true +} +func (this *K8SMetadataMessage_K8SMetadataUpdate) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*K8SMetadataMessage_K8SMetadataUpdate) + if !ok { + that2, ok := that.(K8SMetadataMessage_K8SMetadataUpdate) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.K8SMetadataUpdate.Equal(that1.K8SMetadataUpdate) { + return false + } + return true +} +func (this *K8SMetadataMessage_MissingK8SMetadataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*K8SMetadataMessage_MissingK8SMetadataRequest) + if !ok { + that2, ok := that.(K8SMetadataMessage_MissingK8SMetadataRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.MissingK8SMetadataRequest.Equal(that1.MissingK8SMetadataRequest) { return false } return true @@ -2220,6 +2631,90 @@ func (this *RemoveTracepointRequest) Equal(that interface{}) bool { } return true } +func (this *RegisterFileSourceRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RegisterFileSourceRequest) + if !ok { + that2, ok := that.(RegisterFileSourceRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.FileSourceDeployment.Equal(that1.FileSourceDeployment) { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + return true +} +func (this *FileSourceInfoUpdate) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FileSourceInfoUpdate) + if !ok { + that2, ok := that.(FileSourceInfoUpdate) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + if this.State != that1.State { + return false + } + if !this.Status.Equal(that1.Status) { + return false + } + if !this.AgentID.Equal(that1.AgentID) { + return false + } + return true +} +func (this *RemoveFileSourceRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RemoveFileSourceRequest) + if !ok { + that2, ok := that.(RemoveFileSourceRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + return true +} func (this *ConfigUpdateRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2278,7 +2773,7 @@ func (this *VizierMessage) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 13) + s := make([]string, 0, 14) s = append(s, "&messagespb.VizierMessage{") if this.Msg != nil { s = append(s, "Msg: "+fmt.Sprintf("%#v", this.Msg)+",\n") @@ -2358,6 +2853,14 @@ func (this *VizierMessage_K8SMetadataMessage) GoString() string { `K8SMetadataMessage:` + fmt.Sprintf("%#v", this.K8SMetadataMessage) + `}`}, ", ") return s } +func (this *VizierMessage_FileSourceMessage) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&messagespb.VizierMessage_FileSourceMessage{` + + `FileSourceMessage:` + fmt.Sprintf("%#v", this.FileSourceMessage) + `}`}, ", ") + return s +} func (this *TracepointMessage) GoString() string { if this == nil { return "nil" @@ -2394,6 +2897,42 @@ func (this *TracepointMessage_RegisterTracepointRequest) GoString() string { `RegisterTracepointRequest:` + fmt.Sprintf("%#v", this.RegisterTracepointRequest) + `}`}, ", ") return s } +func (this *FileSourceMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&messagespb.FileSourceMessage{") + if this.Msg != nil { + s = append(s, "Msg: "+fmt.Sprintf("%#v", this.Msg)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileSourceMessage_FileSourceInfoUpdate) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&messagespb.FileSourceMessage_FileSourceInfoUpdate{` + + `FileSourceInfoUpdate:` + fmt.Sprintf("%#v", this.FileSourceInfoUpdate) + `}`}, ", ") + return s +} +func (this *FileSourceMessage_RemoveFileSourceRequest) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&messagespb.FileSourceMessage_RemoveFileSourceRequest{` + + `RemoveFileSourceRequest:` + fmt.Sprintf("%#v", this.RemoveFileSourceRequest) + `}`}, ", ") + return s +} +func (this *FileSourceMessage_RegisterFileSourceRequest) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&messagespb.FileSourceMessage_RegisterFileSourceRequest{` + + `RegisterFileSourceRequest:` + fmt.Sprintf("%#v", this.RegisterFileSourceRequest) + `}`}, ", ") + return s +} func (this *ConfigUpdateMessage) GoString() string { if this == nil { return "nil" @@ -2621,6 +3160,52 @@ func (this *RemoveTracepointRequest) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *RegisterFileSourceRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&messagespb.RegisterFileSourceRequest{") + if this.FileSourceDeployment != nil { + s = append(s, "FileSourceDeployment: "+fmt.Sprintf("%#v", this.FileSourceDeployment)+",\n") + } + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileSourceInfoUpdate) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&messagespb.FileSourceInfoUpdate{") + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + } + if this.AgentID != nil { + s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RemoveFileSourceRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&messagespb.RemoveFileSourceRequest{") + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *ConfigUpdateRequest) GoString() string { if this == nil { return "nil" @@ -2872,22 +3457,43 @@ func (m *VizierMessage_K8SMetadataMessage) MarshalToSizedBuffer(dAtA []byte) (in } return len(dAtA) - i, nil } -func (m *TracepointMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TracepointMessage) MarshalTo(dAtA []byte) (int, error) { +func (m *VizierMessage_FileSourceMessage) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TracepointMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *VizierMessage_FileSourceMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FileSourceMessage != nil { + { + size, err := m.FileSourceMessage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *TracepointMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TracepointMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TracepointMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2967,6 +3573,101 @@ func (m *TracepointMessage_RegisterTracepointRequest) MarshalToSizedBuffer(dAtA } return len(dAtA) - i, nil } +func (m *FileSourceMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileSourceMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileSourceMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Msg != nil { + { + size := m.Msg.Size() + i -= size + if _, err := m.Msg.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *FileSourceMessage_FileSourceInfoUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileSourceMessage_FileSourceInfoUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FileSourceInfoUpdate != nil { + { + size, err := m.FileSourceInfoUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *FileSourceMessage_RemoveFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileSourceMessage_RemoveFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RemoveFileSourceRequest != nil { + { + size, err := m.RemoveFileSourceRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *FileSourceMessage_RegisterFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileSourceMessage_RegisterFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RegisterFileSourceRequest != nil { + { + size, err := m.RegisterFileSourceRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} func (m *ConfigUpdateMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3682,6 +4383,152 @@ func (m *RemoveTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *RegisterFileSourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegisterFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RegisterFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ID != nil { + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.FileSourceDeployment != nil { + { + size, err := m.FileSourceDeployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FileSourceInfoUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileSourceInfoUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileSourceInfoUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AgentID != nil { + { + size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.State != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if m.ID != nil { + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveFileSourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ID != nil { + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessages(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ConfigUpdateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3887,6 +4734,18 @@ func (m *VizierMessage_K8SMetadataMessage) Size() (n int) { } return n } +func (m *VizierMessage_FileSourceMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FileSourceMessage != nil { + l = m.FileSourceMessage.Size() + n += 1 + l + sovMessages(uint64(l)) + } + return n +} func (m *TracepointMessage) Size() (n int) { if m == nil { return 0 @@ -3935,7 +4794,7 @@ func (m *TracepointMessage_RegisterTracepointRequest) Size() (n int) { } return n } -func (m *ConfigUpdateMessage) Size() (n int) { +func (m *FileSourceMessage) Size() (n int) { if m == nil { return 0 } @@ -3947,14 +4806,62 @@ func (m *ConfigUpdateMessage) Size() (n int) { return n } -func (m *ConfigUpdateMessage_ConfigUpdateRequest) Size() (n int) { +func (m *FileSourceMessage_FileSourceInfoUpdate) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.ConfigUpdateRequest != nil { - l = m.ConfigUpdateRequest.Size() + if m.FileSourceInfoUpdate != nil { + l = m.FileSourceInfoUpdate.Size() + n += 1 + l + sovMessages(uint64(l)) + } + return n +} +func (m *FileSourceMessage_RemoveFileSourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RemoveFileSourceRequest != nil { + l = m.RemoveFileSourceRequest.Size() + n += 1 + l + sovMessages(uint64(l)) + } + return n +} +func (m *FileSourceMessage_RegisterFileSourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RegisterFileSourceRequest != nil { + l = m.RegisterFileSourceRequest.Size() + n += 1 + l + sovMessages(uint64(l)) + } + return n +} +func (m *ConfigUpdateMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Msg != nil { + n += m.Msg.Size() + } + return n +} + +func (m *ConfigUpdateMessage_ConfigUpdateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigUpdateRequest != nil { + l = m.ConfigUpdateRequest.Size() n += 1 + l + sovMessages(uint64(l)) } return n @@ -4229,6 +5136,60 @@ func (m *RemoveTracepointRequest) Size() (n int) { return n } +func (m *RegisterFileSourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FileSourceDeployment != nil { + l = m.FileSourceDeployment.Size() + n += 1 + l + sovMessages(uint64(l)) + } + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + +func (m *FileSourceInfoUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovMessages(uint64(l)) + } + if m.State != 0 { + n += 1 + sovMessages(uint64(m.State)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovMessages(uint64(l)) + } + if m.AgentID != nil { + l = m.AgentID.Size() + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + +func (m *RemoveFileSourceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + func (m *ConfigUpdateRequest) Size() (n int) { if m == nil { return 0 @@ -4369,6 +5330,16 @@ func (this *VizierMessage_K8SMetadataMessage) String() string { }, "") return s } +func (this *VizierMessage_FileSourceMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VizierMessage_FileSourceMessage{`, + `FileSourceMessage:` + strings.Replace(fmt.Sprintf("%v", this.FileSourceMessage), "FileSourceMessage", "FileSourceMessage", 1) + `,`, + `}`, + }, "") + return s +} func (this *TracepointMessage) String() string { if this == nil { return "nil" @@ -4409,6 +5380,46 @@ func (this *TracepointMessage_RegisterTracepointRequest) String() string { }, "") return s } +func (this *FileSourceMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileSourceMessage{`, + `Msg:` + fmt.Sprintf("%v", this.Msg) + `,`, + `}`, + }, "") + return s +} +func (this *FileSourceMessage_FileSourceInfoUpdate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileSourceMessage_FileSourceInfoUpdate{`, + `FileSourceInfoUpdate:` + strings.Replace(fmt.Sprintf("%v", this.FileSourceInfoUpdate), "FileSourceInfoUpdate", "FileSourceInfoUpdate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FileSourceMessage_RemoveFileSourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileSourceMessage_RemoveFileSourceRequest{`, + `RemoveFileSourceRequest:` + strings.Replace(fmt.Sprintf("%v", this.RemoveFileSourceRequest), "RemoveFileSourceRequest", "RemoveFileSourceRequest", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FileSourceMessage_RegisterFileSourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileSourceMessage_RegisterFileSourceRequest{`, + `RegisterFileSourceRequest:` + strings.Replace(fmt.Sprintf("%v", this.RegisterFileSourceRequest), "RegisterFileSourceRequest", "RegisterFileSourceRequest", 1) + `,`, + `}`, + }, "") + return s +} func (this *ConfigUpdateMessage) String() string { if this == nil { return "nil" @@ -4621,6 +5632,40 @@ func (this *RemoveTracepointRequest) String() string { }, "") return s } +func (this *RegisterFileSourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegisterFileSourceRequest{`, + `FileSourceDeployment:` + strings.Replace(fmt.Sprintf("%v", this.FileSourceDeployment), "FileSourceDeployment", "ir.FileSourceDeployment", 1) + `,`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FileSourceInfoUpdate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileSourceInfoUpdate{`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "UUID", "uuidpb.UUID", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveFileSourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveFileSourceRequest{`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `}`, + }, "") + return s +} func (this *ConfigUpdateRequest) String() string { if this == nil { return "nil" @@ -4995,6 +6040,41 @@ func (m *VizierMessage) Unmarshal(dAtA []byte) error { } m.Msg = &VizierMessage_K8SMetadataMessage{v} iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileSourceMessage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileSourceMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Msg = &VizierMessage_FileSourceMessage{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMessages(dAtA[iNdEx:]) @@ -5171,7 +6251,7 @@ func (m *TracepointMessage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigUpdateMessage) Unmarshal(dAtA []byte) error { +func (m *FileSourceMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5194,15 +6274,15 @@ func (m *ConfigUpdateMessage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigUpdateMessage: wiretype end group for non-group") + return fmt.Errorf("proto: FileSourceMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigUpdateMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FileSourceMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigUpdateRequest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FileSourceInfoUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5229,65 +6309,15 @@ func (m *ConfigUpdateMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ConfigUpdateRequest{} + v := &FileSourceInfoUpdate{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Msg = &ConfigUpdateMessage_ConfigUpdateRequest{v} + m.Msg = &FileSourceMessage_FileSourceInfoUpdate{v} iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *K8SMetadataMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: K8sMetadataMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: K8sMetadataMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field K8SMetadataUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RemoveFileSourceRequest", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5314,15 +6344,15 @@ func (m *K8SMetadataMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &metadatapb.ResourceUpdate{} + v := &RemoveFileSourceRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Msg = &K8SMetadataMessage_K8SMetadataUpdate{v} + m.Msg = &FileSourceMessage_RemoveFileSourceRequest{v} iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MissingK8SMetadataRequest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RegisterFileSourceRequest", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5349,17 +6379,222 @@ func (m *K8SMetadataMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &metadatapb.MissingK8SMetadataRequest{} + v := &RegisterFileSourceRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Msg = &K8SMetadataMessage_MissingK8SMetadataRequest{v} + m.Msg = &FileSourceMessage_RegisterFileSourceRequest{v} iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MissingK8SMetadataResponse", wireType) - } - var msglen int + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigUpdateMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigUpdateMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigUpdateMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigUpdateRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ConfigUpdateRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Msg = &ConfigUpdateMessage_ConfigUpdateRequest{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *K8SMetadataMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: K8sMetadataMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: K8sMetadataMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field K8SMetadataUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &metadatapb.ResourceUpdate{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Msg = &K8SMetadataMessage_K8SMetadataUpdate{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MissingK8SMetadataRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &metadatapb.MissingK8SMetadataRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Msg = &K8SMetadataMessage_MissingK8SMetadataRequest{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MissingK8SMetadataResponse", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowMessages @@ -6874,6 +8109,391 @@ func (m *RemoveTracepointRequest) Unmarshal(dAtA []byte) error { } return nil } +func (m *RegisterFileSourceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegisterFileSourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegisterFileSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileSourceDeployment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FileSourceDeployment == nil { + m.FileSourceDeployment = &ir.FileSourceDeployment{} + } + if err := m.FileSourceDeployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &uuidpb.UUID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileSourceInfoUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileSourceInfoUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileSourceInfoUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &uuidpb.UUID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= statuspb.LifeCycleState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &statuspb.Status{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentID == nil { + m.AgentID = &uuidpb.UUID{} + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveFileSourceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveFileSourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveFileSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &uuidpb.UUID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ConfigUpdateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/src/vizier/messages/messagespb/messages.proto b/src/vizier/messages/messagespb/messages.proto index 92bc4785084..32e61d92dba 100644 --- a/src/vizier/messages/messagespb/messages.proto +++ b/src/vizier/messages/messagespb/messages.proto @@ -26,6 +26,7 @@ import "gogoproto/gogo.proto"; import "src/api/proto/uuidpb/uuid.proto"; import "src/carnot/planner/distributedpb/distributed_plan.proto"; import "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.proto"; +import "src/carnot/planner/file_source/ir/logical.proto"; import "src/carnot/planpb/plan.proto"; import "src/common/base/statuspb/status.proto"; import "src/shared/k8s/metadatapb/metadata.proto"; @@ -44,6 +45,7 @@ message VizierMessage { TracepointMessage tracepoint_message = 10; ConfigUpdateMessage config_update_message = 11; K8sMetadataMessage k8s_metadata_message = 12; + FileSourceMessage file_source_message = 13; } // DEPRECATED: Formerly used for UpdateAgentRequest. reserved 3; @@ -60,6 +62,15 @@ message TracepointMessage { } } +// A wrapper around all file source-related messages that can be sent over the message bus. +message FileSourceMessage { + oneof msg { + FileSourceInfoUpdate file_source_info_update = 1; + RemoveFileSourceRequest remove_file_source_request = 2; + RegisterFileSourceRequest register_file_source_request = 3; + } +} + // A wrapper around all PEM-config-related messages that can be sent over the message bus. message ConfigUpdateMessage { oneof msg { @@ -172,6 +183,27 @@ message RemoveTracepointRequest { uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; } +// The request to register file sources on a PEM. +message RegisterFileSourceRequest { + px.carnot.planner.file_source.ir.FileSourceDeployment file_source_deployment = 1; + uuidpb.UUID id = 2 [ (gogoproto.customname) = "ID" ]; +} + +// An update message sent when a file source's status changes. +message FileSourceInfoUpdate { + uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; + // The state of the file source. + px.statuspb.LifeCycleState state = 2; + // The status of the file source, specified if the state of the file source is not healthy. + px.statuspb.Status status = 3; + // The ID of the agent sending the update. + uuidpb.UUID agent_id = 4 [ (gogoproto.customname) = "AgentID" ]; +} + +message RemoveFileSourceRequest { + uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; +} + // A request to update a config setting on a PEM. message ConfigUpdateRequest { // The key of the setting that should be updated. diff --git a/src/vizier/services/agent/kelvin/kelvin_manager.h b/src/vizier/services/agent/kelvin/kelvin_manager.h index 51b0c2fc993..2c2959736f4 100644 --- a/src/vizier/services/agent/kelvin/kelvin_manager.h +++ b/src/vizier/services/agent/kelvin/kelvin_manager.h @@ -60,6 +60,7 @@ class KelvinManager : public Manager { static services::shared::agent::AgentCapabilities Capabilities() { services::shared::agent::AgentCapabilities capabilities; capabilities.set_collects_data(false); + capabilities.set_stores_data(true); return capabilities; } diff --git a/src/vizier/services/agent/pem/file_source_manager.cc b/src/vizier/services/agent/pem/file_source_manager.cc new file mode 100644 index 00000000000..650ae2f85a6 --- /dev/null +++ b/src/vizier/services/agent/pem/file_source_manager.cc @@ -0,0 +1,234 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include "src/common/base/base.h" +#include "src/vizier/services/agent/pem/file_source_manager.h" + +constexpr auto kUpdateInterval = std::chrono::seconds(2); + +namespace px { +namespace vizier { +namespace agent { + +FileSourceManager::FileSourceManager(px::event::Dispatcher* dispatcher, Info* agent_info, + Manager::VizierNATSConnector* nats_conn, + stirling::Stirling* stirling, + table_store::TableStore* table_store, + RelationInfoManager* relation_info_manager) + : MessageHandler(dispatcher, agent_info, nats_conn), + dispatcher_(dispatcher), + nats_conn_(nats_conn), + stirling_(stirling), + table_store_(table_store), + relation_info_manager_(relation_info_manager) { + file_source_monitor_timer_ = + dispatcher_->CreateTimer(std::bind(&FileSourceManager::Monitor, this)); + // Kick off the background monitor. + file_source_monitor_timer_->EnableTimer(kUpdateInterval); +} + +Status FileSourceManager::HandleMessage(std::unique_ptr msg) { + // The main purpose of handle message is to update the local state based on updates + // from the MDS. + if (!msg->has_file_source_message()) { + return error::InvalidArgument("Can only handle file source requests"); + } + + const messages::FileSourceMessage& file_source = msg->file_source_message(); + switch (file_source.msg_case()) { + case messages::FileSourceMessage::kRegisterFileSourceRequest: { + return HandleRegisterFileSourceRequest(file_source.register_file_source_request()); + } + case messages::FileSourceMessage::kRemoveFileSourceRequest: { + return HandleRemoveFileSourceRequest(file_source.remove_file_source_request()); + } + default: + LOG(ERROR) << "Unknown message type: " << file_source.msg_case() << " skipping"; + } + return Status::OK(); +} + +std::string FileSourceManager::DebugString() const { + std::lock_guard lock(mu_); + std::stringstream ss; + auto now = std::chrono::steady_clock::now(); + ss << absl::Substitute("File Source Manager Debug State:\n"); + ss << absl::Substitute("ID\tNAME\tCURRENT_STATE\tEXPECTED_STATE\tlast_updated\n"); + for (const auto& [id, file_source] : file_sources_) { + ss << absl::Substitute( + "$0\t$1\t$2\t$3\t$4 seconds\n", id.str(), file_source.name, + statuspb::LifeCycleState_Name(file_source.current_state), + statuspb::LifeCycleState_Name(file_source.expected_state), + std::chrono::duration_cast(now - file_source.last_updated_at) + .count()); + } + return ss.str(); +} + +Status FileSourceManager::HandleRegisterFileSourceRequest( + const messages::RegisterFileSourceRequest& req) { + auto glob_pattern = req.file_source_deployment().glob_pattern(); + PX_ASSIGN_OR_RETURN(auto id, ParseUUID(req.id())); + LOG(INFO) << "Registering file source: " << glob_pattern << " uuid string=" << id.str(); + + FileSourceInfo info; + info.name = glob_pattern; + info.id = id; + info.expected_state = statuspb::RUNNING_STATE; + info.current_state = statuspb::PENDING_STATE; + info.last_updated_at = dispatcher_->GetTimeSource().MonotonicTime(); + stirling_->RegisterFileSource(id, glob_pattern); + { + std::lock_guard lock(mu_); + file_sources_[id] = std::move(info); + } + return Status::OK(); +} + +Status FileSourceManager::HandleRemoveFileSourceRequest( + const messages::RemoveFileSourceRequest& req) { + PX_ASSIGN_OR_RETURN(auto id, ParseUUID(req.id())); + std::lock_guard lock(mu_); + auto it = file_sources_.find(id); + if (it == file_sources_.end()) { + return error::NotFound("File source with ID: $0, not found", id.str()); + } + + it->second.expected_state = statuspb::TERMINATED_STATE; + return stirling_->RemoveFileSource(id); +} + +void FileSourceManager::Monitor() { + std::lock_guard lock(mu_); + + for (auto& [id, file_source] : file_sources_) { + auto s_or_publish = stirling_->GetFileSourceInfo(id); + statuspb::LifeCycleState current_state; + // Get the latest current state according to stirling. + if (s_or_publish.ok()) { + current_state = statuspb::RUNNING_STATE; + } else { + switch (s_or_publish.code()) { + case statuspb::FAILED_PRECONDITION: + // Means the binary has not been found. + current_state = statuspb::FAILED_STATE; + break; + case statuspb::RESOURCE_UNAVAILABLE: + current_state = statuspb::PENDING_STATE; + break; + case statuspb::NOT_FOUND: + // Means we didn't actually find the probe. If we requested termination, + // it's because the probe has been removed. + current_state = (file_source.expected_state == statuspb::TERMINATED_STATE) + ? statuspb::TERMINATED_STATE + : statuspb::UNKNOWN_STATE; + break; + default: + current_state = statuspb::FAILED_STATE; + break; + } + } + + if (current_state != statuspb::RUNNING_STATE && + file_source.expected_state == statuspb::TERMINATED_STATE) { + current_state = statuspb::TERMINATED_STATE; + } + + if (current_state == file_source.current_state) { + // No state transition, nothing to do. + continue; + } + + // The following transitions are legal: + // 1. Pending -> Terminated: Probe is stopped before starting. + // 2. Pending -> Running : Probe starts up. + // 3. Running -> Terminated: Probe is stopped. + // 4. Running -> Failed: Probe got dettached because binary died. + // 5. Failed -> Running: Probe started up because binary came back to life. + // + // In all cases we basically inform the MDS. + // In the cases where we transition to running, we need to update the schemas. + + Status probe_status = Status::OK(); + LOG(INFO) << absl::Substitute("File source[$0]::$1 has transitioned $2 -> $3", id.str(), + file_source.name, + statuspb::LifeCycleState_Name(file_source.current_state), + statuspb::LifeCycleState_Name(current_state)); + // Check if running now, then update the schema. + if (current_state == statuspb::RUNNING_STATE) { + // We must have just transitioned into running. We try to apply the new schema. + // If it fails we will trigger an error and report that to MDS. + auto publish_pb = s_or_publish.ConsumeValueOrDie(); + auto s = UpdateSchema(publish_pb); + if (!s.ok()) { + current_state = statuspb::FAILED_STATE; + probe_status = s; + } + } else { + probe_status = s_or_publish.status(); + } + + file_source.current_state = current_state; + + // Update MDS with the latest status. + px::vizier::messages::VizierMessage msg; + auto file_source_msg = msg.mutable_file_source_message(); + auto update_msg = file_source_msg->mutable_file_source_info_update(); + ToProto(agent_info()->agent_id, update_msg->mutable_agent_id()); + ToProto(id, update_msg->mutable_id()); + update_msg->set_state(file_source.current_state); + probe_status.ToProto(update_msg->mutable_status()); + VLOG(1) << "Sending file source info update message: " << msg.DebugString(); + auto s = nats_conn_->Publish(msg); + if (!s.ok()) { + LOG(ERROR) << "Failed to update nats"; + } + } + file_source_monitor_timer_->EnableTimer(kUpdateInterval); +} + +Status FileSourceManager::UpdateSchema(const stirling::stirlingpb::Publish& publish_pb) { + LOG(INFO) << "Updating schema for file source"; + auto relation_info_vec = ConvertPublishPBToRelationInfo(publish_pb); + // TODO(zasgar): Failure here can lead to an inconsistent schema state. We should + // figure out how to handle this as part of the data model refactor project. + for (const auto& relation_info : relation_info_vec) { + if (!relation_info_manager_->HasRelation(relation_info.name)) { + table_store_->AddTable( + table_store::HotColdTable::Create(relation_info.name, relation_info.relation), + relation_info.name, relation_info.id); + PX_RETURN_IF_ERROR(relation_info_manager_->AddRelationInfo(relation_info)); + } else { + if (relation_info.relation != table_store_->GetTable(relation_info.name)->GetRelation()) { + return error::Internal( + "File source is not compatible with the schema of the specified output table. " + "[table_name=$0]", + relation_info.name); + } + PX_RETURN_IF_ERROR(table_store_->AddTableAlias(relation_info.id, relation_info.name)); + } + } + return Status::OK(); +} + +} // namespace agent +} // namespace vizier +} // namespace px diff --git a/src/vizier/services/agent/pem/file_source_manager.h b/src/vizier/services/agent/pem/file_source_manager.h new file mode 100644 index 00000000000..f45d346f5f2 --- /dev/null +++ b/src/vizier/services/agent/pem/file_source_manager.h @@ -0,0 +1,73 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include + +#include + +#include "src/stirling/stirling.h" +#include "src/vizier/services/agent/shared/manager/manager.h" + +namespace px { +namespace vizier { +namespace agent { + +struct FileSourceInfo { + std::string name; + sole::uuid id; + statuspb::LifeCycleState expected_state; + statuspb::LifeCycleState current_state; + std::chrono::time_point last_updated_at; +}; + +class FileSourceManager : public Manager::MessageHandler { + public: + FileSourceManager() = delete; + FileSourceManager(px::event::Dispatcher* dispatcher, Info* agent_info, + Manager::VizierNATSConnector* nats_conn, stirling::Stirling* stirling, + table_store::TableStore* table_store, + RelationInfoManager* relation_info_manager); + + Status HandleMessage(std::unique_ptr msg) override; + std::string DebugString() const; + Status HandleRegisterFileSourceRequest(const messages::RegisterFileSourceRequest& req); + Status HandleRemoveFileSourceRequest(const messages::RemoveFileSourceRequest& req); + + private: + // The tracepoint Monitor that is responsible for watching and updating the state of + // active tracepoints. + void Monitor(); + Status UpdateSchema(const stirling::stirlingpb::Publish& publish_proto); + + px::event::Dispatcher* dispatcher_; + Manager::VizierNATSConnector* nats_conn_; + stirling::Stirling* stirling_; + table_store::TableStore* table_store_; + RelationInfoManager* relation_info_manager_; + + event::TimerUPtr file_source_monitor_timer_; + mutable std::mutex mu_; + absl::flat_hash_map file_sources_; +}; + +} // namespace agent +} // namespace vizier +} // namespace px diff --git a/src/vizier/services/agent/pem/pem_manager.cc b/src/vizier/services/agent/pem/pem_manager.cc index ff9f1e0ffad..c73444b9b6c 100644 --- a/src/vizier/services/agent/pem/pem_manager.cc +++ b/src/vizier/services/agent/pem/pem_manager.cc @@ -78,6 +78,11 @@ Status PEMManager::PostRegisterHookImpl() { stirling_.get(), table_store(), relation_info_manager()); PX_RETURN_IF_ERROR(RegisterMessageHandler(messages::VizierMessage::MsgCase::kTracepointMessage, tracepoint_manager_)); + file_source_manager_ = + std::make_shared(dispatcher(), info(), agent_nats_connector(), + stirling_.get(), table_store(), relation_info_manager()); + PX_RETURN_IF_ERROR(RegisterMessageHandler(messages::VizierMessage::MsgCase::kFileSourceMessage, + file_source_manager_)); return Status::OK(); } @@ -145,20 +150,20 @@ Status PEMManager::InitSchemas() { // Special case to set the max size of the http_events table differently from the other // tables. For now, the min cold batch size is set to 256kB to be consistent with previous // behaviour. - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - http_table_size, 256 * 1024); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, http_table_size, 256 * 1024); } else if (relation_info.name == "stirling_error") { - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - stirling_error_table_size); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, stirling_error_table_size); } else if (relation_info.name == "probe_status") { - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - probe_status_table_size); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, probe_status_table_size); } else if (relation_info.name == "proc_exit_events") { - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - proc_exit_events_table_size); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, proc_exit_events_table_size); } else { - table_ptr = std::make_shared(relation_info.name, relation_info.relation, - other_table_size); + table_ptr = std::make_shared( + relation_info.name, relation_info.relation, other_table_size); } table_store()->AddTable(std::move(table_ptr), relation_info.name, relation_info.id); diff --git a/src/vizier/services/agent/pem/pem_manager.h b/src/vizier/services/agent/pem/pem_manager.h index 9dcbab9b4f9..d9c138355d9 100644 --- a/src/vizier/services/agent/pem/pem_manager.h +++ b/src/vizier/services/agent/pem/pem_manager.h @@ -28,6 +28,7 @@ #include "src/common/system/kernel_version.h" #include "src/stirling/stirling.h" +#include "src/vizier/services/agent/pem/file_source_manager.h" #include "src/vizier/services/agent/pem/tracepoint_manager.h" #include "src/vizier/services/agent/shared/manager/manager.h" @@ -104,6 +105,7 @@ class PEMManager : public Manager { std::unique_ptr stirling_; std::shared_ptr tracepoint_manager_; + std::shared_ptr file_source_manager_; // Timer for triggering ClockConverter polls. px::event::TimerUPtr clock_converter_timer_; diff --git a/src/vizier/services/agent/pem/tracepoint_manager.cc b/src/vizier/services/agent/pem/tracepoint_manager.cc index 3c7453c0313..65a18370bd7 100644 --- a/src/vizier/services/agent/pem/tracepoint_manager.cc +++ b/src/vizier/services/agent/pem/tracepoint_manager.cc @@ -204,6 +204,7 @@ void TracepointManager::Monitor() { ToProto(id, update_msg->mutable_id()); update_msg->set_state(tracepoint.current_state); probe_status.ToProto(update_msg->mutable_status()); + VLOG(1) << "Sending tracepoint info update message: " << msg.DebugString(); auto s = nats_conn_->Publish(msg); if (!s.ok()) { LOG(ERROR) << "Failed to update nats"; @@ -219,8 +220,9 @@ Status TracepointManager::UpdateSchema(const stirling::stirlingpb::Publish& publ // figure out how to handle this as part of the data model refactor project. for (const auto& relation_info : relation_info_vec) { if (!relation_info_manager_->HasRelation(relation_info.name)) { - table_store_->AddTable(table_store::Table::Create(relation_info.name, relation_info.relation), - relation_info.name, relation_info.id); + table_store_->AddTable( + table_store::HotColdTable::Create(relation_info.name, relation_info.relation), + relation_info.name, relation_info.id); PX_RETURN_IF_ERROR(relation_info_manager_->AddRelationInfo(relation_info)); } else { if (relation_info.relation != table_store_->GetTable(relation_info.name)->GetRelation()) { diff --git a/src/vizier/services/agent/shared/manager/BUILD.bazel b/src/vizier/services/agent/shared/manager/BUILD.bazel index 7ba7ff6b8cc..2bf527935d4 100644 --- a/src/vizier/services/agent/shared/manager/BUILD.bazel +++ b/src/vizier/services/agent/shared/manager/BUILD.bazel @@ -42,6 +42,7 @@ pl_cc_library( "//src/vizier/funcs:cc_library", "//src/vizier/messages/messagespb:messages_pl_cc_proto", "//src/vizier/services/agent/shared/base:cc_library", + "//src/stirling/source_connectors/stirling_error:cc_library", "//third_party:natsc", "@com_github_arun11299_cpp_jwt//:cpp_jwt", "@com_github_cameron314_concurrentqueue//:concurrentqueue", diff --git a/src/vizier/services/agent/shared/manager/heartbeat.cc b/src/vizier/services/agent/shared/manager/heartbeat.cc index 4b48c5c68a6..0f0e77aeef5 100644 --- a/src/vizier/services/agent/shared/manager/heartbeat.cc +++ b/src/vizier/services/agent/shared/manager/heartbeat.cc @@ -100,7 +100,8 @@ Status HeartbeatMessageHandler::SendHeartbeatInternal() { auto* update_info = hb->mutable_update_info(); ConsumeAgentPIDUpdates(update_info); - if (agent_info()->capabilities.collects_data() && + auto capabilities = agent_info()->capabilities; + if ((capabilities.collects_data() || capabilities.stores_data()) && (!sent_schema_ || relation_info_manager_->has_updates())) { sent_schema_ = true; relation_info_manager_->AddSchemaToUpdateInfo(update_info); diff --git a/src/vizier/services/agent/shared/manager/heartbeat.h b/src/vizier/services/agent/shared/manager/heartbeat.h index ea7a88dd352..50361997854 100644 --- a/src/vizier/services/agent/shared/manager/heartbeat.h +++ b/src/vizier/services/agent/shared/manager/heartbeat.h @@ -21,12 +21,19 @@ #include #include "src/vizier/services/agent/shared/manager/manager.h" +#include "src/table_store/table_store.h" +#include "src/shared/schema/utils.h" +#include "src/stirling/source_connectors/stirling_error/sink_results_table.h" +#include "src/stirling/core/pub_sub_manager.h" namespace px { namespace vizier { namespace agent { class HeartbeatMessageHandler : public Manager::MessageHandler { + + const std::string kSinkResultsTableName = "sink_results"; + public: HeartbeatMessageHandler() = delete; HeartbeatMessageHandler(px::event::Dispatcher* dispatcher, @@ -40,6 +47,20 @@ class HeartbeatMessageHandler : public Manager::MessageHandler { void DisableHeartbeats(); void EnableHeartbeats(); + Status CreateSinkResultsTable(table_store::TableStore* table_store) { + auto mgr = std::make_unique(stirling::kSinkResultsTable); + std::vector> mgrs; + mgrs.push_back(std::move(mgr)); + stirling::stirlingpb::Publish publish_pb; + PopulatePublishProto(&publish_pb, mgrs); + auto relation_info_vec = ConvertPublishPBToRelationInfo(publish_pb); + auto relation_info = relation_info_vec[0]; + auto table = table_store::HotColdTable::Create(relation_info.name, relation_info.relation); + table_store->AddTable(std::move(table), relation_info.name, relation_info.id); + PX_RETURN_IF_ERROR(relation_info_manager_->AddRelationInfo(relation_info)); + return Status::OK(); + } + private: void ConsumeAgentPIDUpdates(messages::AgentUpdateInfo* update_info); void ProcessPIDStartedEvent(const px::md::PIDStartedEvent& ev, diff --git a/src/vizier/services/agent/shared/manager/heartbeat_test.cc b/src/vizier/services/agent/shared/manager/heartbeat_test.cc index 249a34ea1fc..666c3223f15 100644 --- a/src/vizier/services/agent/shared/manager/heartbeat_test.cc +++ b/src/vizier/services/agent/shared/manager/heartbeat_test.cc @@ -114,10 +114,10 @@ class HeartbeatMessageHandlerTest : public ::testing::Test { // Relation info with no tabletization. Relation relation0({types::TIME64NS, types::INT64}, {"time_", "count"}); - RelationInfo relation_info0("relation0", /* id */ 0, "desc0", relation0); + RelationInfo relation_info0("relation0", /* id */ 0, "desc0", std::nullopt, relation0); // Relation info with no tabletization. Relation relation1({types::TIME64NS, types::FLOAT64}, {"time_", "gauge"}); - RelationInfo relation_info1("relation1", /* id */ 1, "desc1", relation1); + RelationInfo relation_info1("relation1", /* id */ 1, "desc1", std::nullopt, relation1); std::vector relation_info_vec({relation_info0, relation_info1}); // Pass relation info to the manager. relation_info_manager_ = std::make_unique(); @@ -299,7 +299,7 @@ TEST_F(HeartbeatMessageHandlerTest, HandleHeartbeatRelationUpdates) { auto s = heartbeat_handler_->HandleMessage(std::move(hb_ack)); Relation relation2({types::TIME64NS, types::FLOAT64}, {"time_", "gauge"}); - RelationInfo relation_info2("relation2", /* id */ 1, "desc2", relation2); + RelationInfo relation_info2("relation2", /* id */ 1, "desc2", std::nullopt, relation2); s = relation_info_manager_->AddRelationInfo(relation_info2); time_system_->Sleep(std::chrono::milliseconds(5 * 5000 + 1)); diff --git a/src/vizier/services/agent/shared/manager/manager.cc b/src/vizier/services/agent/shared/manager/manager.cc index 004eb5ba2ea..01efe60044b 100644 --- a/src/vizier/services/agent/shared/manager/manager.cc +++ b/src/vizier/services/agent/shared/manager/manager.cc @@ -87,6 +87,13 @@ Manager::MDTPServiceSPtr CreateMDTPStub(const std::shared_ptr& ch return std::make_shared(chan); } +Manager::MDFSServiceSPtr CreateMDFSStub(const std::shared_ptr& chan) { + if (chan == nullptr) { + return nullptr; + } + return std::make_shared(chan); +} + std::shared_ptr CreateCronScriptStub( const std::shared_ptr& chan) { if (chan == nullptr) { @@ -108,7 +115,7 @@ Manager::Manager(sole::uuid agent_id, std::string_view pod_name, std::string_vie relation_info_manager_(std::make_unique()), mds_channel_(grpc::CreateChannel(std::string(mds_url), grpc_channel_creds_)), func_context_(this, CreateMDSStub(mds_channel_), CreateMDTPStub(mds_channel_), - CreateCronScriptStub(mds_channel_), table_store_, + CreateMDFSStub(mds_channel_), CreateCronScriptStub(mds_channel_), table_store_, [](grpc::ClientContext* ctx) { AddServiceTokenToClientContext(ctx); }), memory_metrics_(&GetMetricsRegistry(), "agent_id", agent_id.str()) { // Register Vizier specific and carnot builtin functions. @@ -229,6 +236,10 @@ Status Manager::RegisterBackgroundHelpers() { heartbeat_handler_ = std::make_shared( dispatcher_.get(), mds_manager_.get(), relation_info_manager_.get(), &info_, agent_nats_connector_.get()); + if (info_.capabilities.stores_data()) { + LOG(INFO) << "Creating results table"; + PX_RETURN_IF_ERROR(heartbeat_handler_->CreateSinkResultsTable(table_store())); + } auto heartbeat_nack_handler = std::make_shared( dispatcher_.get(), &info_, agent_nats_connector_.get(), @@ -288,8 +299,11 @@ Status Manager::PostRegisterHook(uint32_t asid) { LOG_IF(FATAL, info_.asid != 0) << "Attempted to register existing agent with new ASID"; info_.asid = asid; + const std::string proc_pid_path = std::string("/proc/") + std::to_string(info_.pid); + PX_ASSIGN_OR_RETURN(auto start_time, system::GetPIDStartTimeTicks(proc_pid_path)); + mds_manager_ = std::make_unique( - info_.hostname, info_.asid, info_.pid, info_.pod_name, info_.agent_id, + info_.hostname, info_.asid, info_.pid, start_time, info_.pod_name, info_.agent_id, info_.capabilities.collects_data(), px::system::Config::GetInstance(), agent_metadata_filter_.get(), sole::rebuild(FLAGS_vizier_id), FLAGS_vizier_name, FLAGS_vizier_namespace, time_system_.get()); diff --git a/src/vizier/services/agent/shared/manager/manager.h b/src/vizier/services/agent/shared/manager/manager.h index 3d7a8a4f49e..af2cd912a5a 100644 --- a/src/vizier/services/agent/shared/manager/manager.h +++ b/src/vizier/services/agent/shared/manager/manager.h @@ -92,6 +92,8 @@ class Manager : public BaseManager { using MDSServiceSPtr = std::shared_ptr; using MDTPService = services::metadata::MetadataTracepointService; using MDTPServiceSPtr = std::shared_ptr; + using MDFSService = services::metadata::MetadataFileSourceService; + using MDFSServiceSPtr = std::shared_ptr; using ResultSinkStub = px::carnotpb::ResultSinkService::StubInterface; Manager() = delete; diff --git a/src/vizier/services/agent/shared/manager/relation_info_manager.cc b/src/vizier/services/agent/shared/manager/relation_info_manager.cc index cb3fc51ea8b..d227978224c 100644 --- a/src/vizier/services/agent/shared/manager/relation_info_manager.cc +++ b/src/vizier/services/agent/shared/manager/relation_info_manager.cc @@ -54,6 +54,9 @@ void RelationInfoManager::AddSchemaToUpdateInfo(messages::AgentUpdateInfo* updat schema->set_tabletized(relation_info.tabletized); schema->set_tabletization_key(relation.GetColumnName(relation_info.tabletization_key_idx)); } + if (relation_info.mutation_id.has_value()) { + schema->set_mutation_id(relation_info.mutation_id.value()); + } for (size_t i = 0; i < relation.NumColumns(); ++i) { auto* column = schema->add_columns(); column->set_name(relation.GetColumnName(i)); diff --git a/src/vizier/services/agent/shared/manager/relation_info_manager_test.cc b/src/vizier/services/agent/shared/manager/relation_info_manager_test.cc index 7f9a06c750c..abeb919847c 100644 --- a/src/vizier/services/agent/shared/manager/relation_info_manager_test.cc +++ b/src/vizier/services/agent/shared/manager/relation_info_manager_test.cc @@ -75,11 +75,11 @@ schema { TEST_F(RelationInfoManagerTest, test_update) { // Relation info with no tabletization. Relation relation0({types::TIME64NS, types::INT64}, {"time_", "count"}); - RelationInfo relation_info0("relation0", /* id */ 0, "desc0", relation0); + RelationInfo relation_info0("relation0", /* id */ 0, "desc0", std::nullopt, relation0); // Relation info with no tabletization. Relation relation1({types::TIME64NS, types::FLOAT64}, {"time_", "gauge"}); - RelationInfo relation_info1("relation1", /* id */ 1, "desc1", relation1); + RelationInfo relation_info1("relation1", /* id */ 1, "desc1", std::nullopt, relation1); EXPECT_OK(relation_info_manager_->AddRelationInfo(std::move(relation_info0))); EXPECT_OK(relation_info_manager_->AddRelationInfo(std::move(relation_info1))); @@ -131,12 +131,12 @@ schema { TEST_F(RelationInfoManagerTest, test_tabletization_keys) { // Relation info with no tabletization. Relation relation0({types::TIME64NS, types::INT64}, {"time_", "count"}); - RelationInfo relation_info0("relation0", /* id */ 0, "desc0", relation0); + RelationInfo relation_info0("relation0", /* id */ 0, "desc0", std::nullopt, relation0); // Relation info with a tablet key ("upid"). Relation relation1({types::TIME64NS, types::UINT128, types::INT64}, {"time_", "upid", "count"}); RelationInfo relation_info1("relation1", /* id */ 1, "desc1", /* tabletization_key_idx */ 1, - relation1); + std::nullopt, relation1); EXPECT_FALSE(relation_info_manager_->has_updates()); diff --git a/src/vizier/services/metadata/BUILD.bazel b/src/vizier/services/metadata/BUILD.bazel index 9d52501dcd2..f885bd1c777 100644 --- a/src/vizier/services/metadata/BUILD.bazel +++ b/src/vizier/services/metadata/BUILD.bazel @@ -33,6 +33,7 @@ go_library( "//src/vizier/services/metadata/controllers", "//src/vizier/services/metadata/controllers/agent", "//src/vizier/services/metadata/controllers/cronscript", + "//src/vizier/services/metadata/controllers/file_source", "//src/vizier/services/metadata/controllers/k8smeta", "//src/vizier/services/metadata/controllers/tracepoint", "//src/vizier/services/metadata/metadataenv", diff --git a/src/vizier/services/metadata/controllers/BUILD.bazel b/src/vizier/services/metadata/controllers/BUILD.bazel index 0fd8cc0fee5..ca6fe64f35a 100644 --- a/src/vizier/services/metadata/controllers/BUILD.bazel +++ b/src/vizier/services/metadata/controllers/BUILD.bazel @@ -35,6 +35,7 @@ go_library( "//src/utils", "//src/vizier/messages/messagespb:messages_pl_go_proto", "//src/vizier/services/metadata/controllers/agent", + "//src/vizier/services/metadata/controllers/file_source", "//src/vizier/services/metadata/controllers/k8smeta", "//src/vizier/services/metadata/controllers/tracepoint", "//src/vizier/services/metadata/metadataenv", @@ -78,6 +79,8 @@ pl_go_test( "//src/vizier/messages/messagespb:messages_pl_go_proto", "//src/vizier/services/metadata/controllers/agent", "//src/vizier/services/metadata/controllers/agent/mock", + "//src/vizier/services/metadata/controllers/file_source", + "//src/vizier/services/metadata/controllers/file_source/mock", "//src/vizier/services/metadata/controllers/testutils", "//src/vizier/services/metadata/controllers/tracepoint", "//src/vizier/services/metadata/controllers/tracepoint/mock", diff --git a/src/vizier/services/metadata/controllers/agent_topic_listener.go b/src/vizier/services/metadata/controllers/agent_topic_listener.go index e8b72cfa463..13743e6ba6f 100644 --- a/src/vizier/services/metadata/controllers/agent_topic_listener.go +++ b/src/vizier/services/metadata/controllers/agent_topic_listener.go @@ -32,6 +32,7 @@ import ( "px.dev/pixie/src/utils" "px.dev/pixie/src/vizier/messages/messagespb" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" + "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" "px.dev/pixie/src/vizier/services/shared/agentpb" "px.dev/pixie/src/vizier/utils/messagebus" @@ -80,6 +81,7 @@ func (c *concurrentAgentMap) delete(agentID uuid.UUID) { type AgentTopicListener struct { agtMgr agent.Manager tpMgr *tracepoint.Manager + fsMgr *file_source.Manager sendMessage SendMessageFn // Map from agent ID -> the agentHandler that's responsible for handling that particular @@ -92,6 +94,7 @@ type AgentHandler struct { id uuid.UUID agtMgr agent.Manager tpMgr *tracepoint.Manager + fsMgr *file_source.Manager atl *AgentTopicListener MsgChannel chan *nats.Msg @@ -103,11 +106,12 @@ type AgentHandler struct { // NewAgentTopicListener creates a new agent topic listener. func NewAgentTopicListener(agtMgr agent.Manager, tpMgr *tracepoint.Manager, - sendMsgFn SendMessageFn, -) (*AgentTopicListener, error) { + fsMgr *file_source.Manager, + sendMsgFn SendMessageFn) (*AgentTopicListener, error) { atl := &AgentTopicListener{ agtMgr: agtMgr, tpMgr: tpMgr, + fsMgr: fsMgr, sendMessage: sendMsgFn, agentMap: &concurrentAgentMap{unsafeMap: make(map[uuid.UUID]*AgentHandler)}, } @@ -162,6 +166,8 @@ func (a *AgentTopicListener) HandleMessage(msg *nats.Msg) error { a.forwardAgentRegisterRequest(m.RegisterAgentRequest, msg) case *messagespb.VizierMessage_TracepointMessage: a.onAgentTracepointMessage(m.TracepointMessage) + case *messagespb.VizierMessage_FileSourceMessage: + a.onAgentFileSourceMessage(m.FileSourceMessage) default: log.WithField("message-type", reflect.TypeOf(pb.Msg).String()). Error("Unhandled message.") @@ -191,6 +197,7 @@ func (a *AgentTopicListener) createAgentHandler(agentID uuid.UUID) *AgentHandler id: agentID, agtMgr: a.agtMgr, tpMgr: a.tpMgr, + fsMgr: a.fsMgr, atl: a, MsgChannel: make(chan *nats.Msg, 10), quitCh: make(chan struct{}), @@ -292,6 +299,23 @@ func (a *AgentTopicListener) onAgentTracepointInfoUpdate(m *messagespb.Tracepoin } } +func (a *AgentTopicListener) onAgentFileSourceMessage(pbMessage *messagespb.FileSourceMessage) { + switch m := pbMessage.Msg.(type) { + case *messagespb.FileSourceMessage_FileSourceInfoUpdate: + a.onAgentFileSourceInfoUpdate(m.FileSourceInfoUpdate) + default: + log.WithField("message-type", reflect.TypeOf(pbMessage.Msg).String()). + Error("Unhandled message.") + } +} + +func (a *AgentTopicListener) onAgentFileSourceInfoUpdate(m *messagespb.FileSourceInfoUpdate) { + err := a.fsMgr.UpdateAgentFileSourceStatus(m.ID, m.AgentID, m.State, m.Status) + if err != nil { + log.WithError(err).Error("Could not update agent tracepoint status") + } +} + // Stop stops processing any agent messagespb. func (a *AgentTopicListener) Stop() { // Grab all the handlers in one go since calling stop will modify the map and need @@ -433,6 +457,22 @@ func (ah *AgentHandler) onAgentRegisterRequest(m *messagespb.RegisterAgentReques } } } + + // Register all file sources on new agent. + fileSources, err := ah.fsMgr.GetAllFileSources() + if err != nil { + log.WithError(err).Error("Could not get all file sources") + return + } + + for _, fs := range fileSources { + if fs.ExpectedState != statuspb.TERMINATED_STATE { + err = ah.fsMgr.RegisterFileSource(agent, utils.UUIDFromProtoOrNil(fs.ID), fs.FileSource) + if err != nil { + log.WithError(err).Error("Failed to send RegisterFileSource request") + } + } + } }() } diff --git a/src/vizier/services/metadata/controllers/agent_topic_listener_test.go b/src/vizier/services/metadata/controllers/agent_topic_listener_test.go index c71ac335204..ad6f8369039 100644 --- a/src/vizier/services/metadata/controllers/agent_topic_listener_test.go +++ b/src/vizier/services/metadata/controllers/agent_topic_listener_test.go @@ -38,6 +38,8 @@ import ( "px.dev/pixie/src/vizier/services/metadata/controllers" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" mock_agent "px.dev/pixie/src/vizier/services/metadata/controllers/agent/mock" + "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" + mock_file_source "px.dev/pixie/src/vizier/services/metadata/controllers/file_source/mock" "px.dev/pixie/src/vizier/services/metadata/controllers/testutils" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" mock_tracepoint "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint/mock" @@ -64,11 +66,12 @@ func assertSendMessageCalledWith(t *testing.T, expTopic string, expMsg messagesp } } -func setup(t *testing.T, sendMsgFn controllers.SendMessageFn) (*controllers.AgentTopicListener, *mock_agent.MockManager, *mock_tracepoint.MockStore, func()) { +func setup(t *testing.T, sendMsgFn controllers.SendMessageFn) (*controllers.AgentTopicListener, *mock_agent.MockManager, *mock_tracepoint.MockStore, *mock_file_source.MockStore, func()) { ctrl := gomock.NewController(t) mockAgtMgr := mock_agent.NewMockManager(ctrl) mockTracepointStore := mock_tracepoint.NewMockStore(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) agentInfo := new(agentpb.Agent) if err := proto.UnmarshalText(testutils.UnhealthyKelvinAgentInfo, agentInfo); err != nil { @@ -82,14 +85,16 @@ func setup(t *testing.T, sendMsgFn controllers.SendMessageFn) (*controllers.Agen Return([]*agentpb.Agent{agentInfo}, nil) tracepointMgr := tracepoint.NewManager(mockTracepointStore, mockAgtMgr, 5*time.Second) - atl, _ := controllers.NewAgentTopicListener(mockAgtMgr, tracepointMgr, sendMsgFn) + fsMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + atl, _ := controllers.NewAgentTopicListener(mockAgtMgr, tracepointMgr, fsMgr, sendMsgFn) cleanup := func() { ctrl.Finish() tracepointMgr.Close() + fsMgr.Close() } - return atl, mockAgtMgr, mockTracepointStore, cleanup + return atl, mockAgtMgr, mockTracepointStore, mockFileSourceStore, cleanup } func TestAgentRegisterRequest(t *testing.T) { @@ -109,8 +114,8 @@ func TestAgentRegisterRequest(t *testing.T) { // Set up mock. var wg sync.WaitGroup - wg.Add(1) - atl, mockAgtMgr, mockTracepointStore, cleanup := setup(t, sendMsg) + wg.Add(2) + atl, mockAgtMgr, mockTracepointStore, mockFileSourceStore, cleanup := setup(t, sendMsg) defer cleanup() agentInfo := &agentpb.Agent{ @@ -139,6 +144,14 @@ func TestAgentRegisterRequest(t *testing.T) { return nil, nil }) + mockFileSourceStore. + EXPECT(). + GetFileSources(). + DoAndReturn(func() ([]*storepb.FileSourceInfo, error) { + wg.Done() + return nil, nil + }) + req := new(messagespb.VizierMessage) if err := proto.UnmarshalText(testutils.RegisterAgentRequestPB, req); err != nil { t.Fatal("Cannot Unmarshal protobuf.") @@ -187,8 +200,8 @@ func TestKelvinRegisterRequest(t *testing.T) { // Set up mock. var wg sync.WaitGroup - wg.Add(1) - atl, mockAgtMgr, mockTracepointStore, cleanup := setup(t, sendMsg) + wg.Add(2) + atl, mockAgtMgr, mockTracepointStore, mockFileSourceStore, cleanup := setup(t, sendMsg) defer cleanup() agentInfo := &agentpb.Agent{ @@ -217,6 +230,14 @@ func TestKelvinRegisterRequest(t *testing.T) { return nil, nil }) + mockFileSourceStore. + EXPECT(). + GetFileSources(). + DoAndReturn(func() ([]*storepb.FileSourceInfo, error) { + wg.Done() + return nil, nil + }) + req := new(messagespb.VizierMessage) if err := proto.UnmarshalText(testutils.RegisterKelvinRequestPB, req); err != nil { t.Fatal("Cannot Unmarshal protobuf.") @@ -262,8 +283,8 @@ func TestAgentReRegisterRequest(t *testing.T) { // Set up mock. var wg sync.WaitGroup - wg.Add(1) - atl, mockAgtMgr, mockTracepointStore, cleanup := setup(t, sendMsg) + wg.Add(2) + atl, mockAgtMgr, mockTracepointStore, mockFileSourceStore, cleanup := setup(t, sendMsg) defer cleanup() agentInfo := &agentpb.Agent{ @@ -293,6 +314,14 @@ func TestAgentReRegisterRequest(t *testing.T) { return nil, nil }) + mockFileSourceStore. + EXPECT(). + GetFileSources(). + DoAndReturn(func() ([]*storepb.FileSourceInfo, error) { + wg.Done() + return nil, nil + }) + req := new(messagespb.VizierMessage) if err := proto.UnmarshalText(testutils.ReregisterPurgedAgentRequestPB, req); err != nil { t.Fatal("Cannot Unmarshal protobuf.") @@ -326,7 +355,7 @@ func TestAgentReRegisterRequest(t *testing.T) { func TestAgentRegisterRequestInvalidUUID(t *testing.T) { // Set up mock. - atl, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, _, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() req := new(messagespb.VizierMessage) @@ -344,7 +373,7 @@ func TestAgentRegisterRequestInvalidUUID(t *testing.T) { func TestAgentCreateFailed(t *testing.T) { var wg sync.WaitGroup - atl, mockAgtMgr, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, mockAgtMgr, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() req := new(messagespb.VizierMessage) @@ -398,7 +427,7 @@ func TestAgentHeartbeat(t *testing.T) { // Set up mock. var wg sync.WaitGroup - atl, mockAgtMgr, _, cleanup := setup(t, func(topic string, b []byte) error { + atl, mockAgtMgr, _, _, cleanup := setup(t, func(topic string, b []byte) error { msg := messagespb.VizierMessage{} if err := proto.Unmarshal(b, &msg); err != nil { t.Fatal("Cannot Unmarshal protobuf.") @@ -474,7 +503,7 @@ func TestAgentHeartbeat_Failed(t *testing.T) { require.NoError(t, err) // Set up mock. - atl, mockAgtMgr, _, cleanup := setup(t, sendMsg) + atl, mockAgtMgr, _, _, cleanup := setup(t, sendMsg) defer cleanup() var wg sync.WaitGroup @@ -498,7 +527,7 @@ func TestAgentHeartbeat_Failed(t *testing.T) { func TestEmptyMessage(t *testing.T) { // Set up mock. - atl, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, _, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() req := new(messagespb.VizierMessage) reqPb, err := req.Marshal() @@ -512,7 +541,7 @@ func TestEmptyMessage(t *testing.T) { func TestUnhandledMessage(t *testing.T) { // Set up mock. - atl, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, _, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() req := new(messagespb.VizierMessage) @@ -530,7 +559,7 @@ func TestUnhandledMessage(t *testing.T) { func TestAgentTracepointInfoUpdate(t *testing.T) { // Set up mock. - atl, _, mockTracepointStore, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, _, mockTracepointStore, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() agentID := uuid.Must(uuid.NewV4()) @@ -567,6 +596,45 @@ func TestAgentTracepointInfoUpdate(t *testing.T) { require.NoError(t, err) } +func TestAgentFileSourceInfoUpdate(t *testing.T) { + // Set up mock. + atl, _, _, mockFileSourceStore, cleanup := setup(t, assertSendMessageUncalled(t)) + defer cleanup() + + agentID := uuid.Must(uuid.NewV4()) + tpID := uuid.Must(uuid.NewV4()) + + mockFileSourceStore. + EXPECT(). + UpdateFileSourceState(&storepb.AgentFileSourceStatus{ + ID: utils.ProtoFromUUID(tpID), + AgentID: utils.ProtoFromUUID(agentID), + State: statuspb.RUNNING_STATE, + }). + Return(nil) + + req := &messagespb.VizierMessage{ + Msg: &messagespb.VizierMessage_FileSourceMessage{ + FileSourceMessage: &messagespb.FileSourceMessage{ + Msg: &messagespb.FileSourceMessage_FileSourceInfoUpdate{ + FileSourceInfoUpdate: &messagespb.FileSourceInfoUpdate{ + ID: utils.ProtoFromUUID(tpID), + AgentID: utils.ProtoFromUUID(agentID), + State: statuspb.RUNNING_STATE, + }, + }, + }, + }, + } + reqPb, err := req.Marshal() + require.NoError(t, err) + + msg := nats.Msg{} + msg.Data = reqPb + err = atl.HandleMessage(&msg) + require.NoError(t, err) +} + func TestAgentStop(t *testing.T) { u, err := uuid.FromString(testutils.NewAgentUUID) require.NoError(t, err) @@ -581,7 +649,7 @@ func TestAgentStop(t *testing.T) { }) // Set up mock. - atl, _, _, cleanup := setup(t, sendMsg) + atl, _, _, _, cleanup := setup(t, sendMsg) defer cleanup() atl.StopAgent(u) diff --git a/src/vizier/services/metadata/controllers/file_source/BUILD.bazel b/src/vizier/services/metadata/controllers/file_source/BUILD.bazel new file mode 100644 index 00000000000..933a76e91a6 --- /dev/null +++ b/src/vizier/services/metadata/controllers/file_source/BUILD.bazel @@ -0,0 +1,74 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("//bazel:pl_build_system.bzl", "pl_go_test") + +go_library( + name = "file_source", + srcs = [ + "file_source.go", + "file_source_store.go", + ], + importpath = "px.dev/pixie/src/vizier/services/metadata/controllers/file_source", + visibility = ["//src/vizier:__subpackages__"], + deps = [ + "//src/api/proto/uuidpb:uuid_pl_go_proto", + "//src/carnot/planner/file_source/ir:logical_pl_go_proto", + "//src/common/base/statuspb:status_pl_go_proto", + "//src/utils", + "//src/vizier/messages/messagespb:messages_pl_go_proto", + "//src/vizier/services/metadata/storepb:store_pl_go_proto", + "//src/vizier/services/shared/agentpb:agent_pl_go_proto", + "//src/vizier/utils/datastore", + "@com_github_gofrs_uuid//:uuid", + "@com_github_gogo_protobuf//proto", + "@com_github_gogo_protobuf//types", + "@com_github_sirupsen_logrus//:logrus", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//status", + "@org_golang_x_sync//errgroup", + ], +) + +pl_go_test( + name = "file_source_test", + srcs = [ + "file_source_store_test.go", + "file_source_test.go", + ], + embed = [":file_source"], + deps = [ + "//src/api/proto/uuidpb:uuid_pl_go_proto", + "//src/carnot/planner/file_source/ir:logical_pl_go_proto", + "//src/common/base/statuspb:status_pl_go_proto", + "//src/utils", + "//src/vizier/messages/messagespb:messages_pl_go_proto", + "//src/vizier/services/metadata/controllers/agent/mock", + "//src/vizier/services/metadata/controllers/file_source/mock", + "//src/vizier/services/metadata/storepb:store_pl_go_proto", + "//src/vizier/services/shared/agentpb:agent_pl_go_proto", + "//src/vizier/utils/datastore/pebbledb", + "@com_github_cockroachdb_pebble//:pebble", + "@com_github_cockroachdb_pebble//vfs", + "@com_github_gofrs_uuid//:uuid", + "@com_github_gogo_protobuf//proto", + "@com_github_gogo_protobuf//types", + "@com_github_golang_mock//gomock", + "@com_github_stretchr_testify//assert", + "@com_github_stretchr_testify//require", + ], +) diff --git a/src/vizier/services/metadata/controllers/file_source/file_source.go b/src/vizier/services/metadata/controllers/file_source/file_source.go new file mode 100644 index 00000000000..770476d1632 --- /dev/null +++ b/src/vizier/services/metadata/controllers/file_source/file_source.go @@ -0,0 +1,375 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package file_source + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/gofrs/uuid" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "px.dev/pixie/src/api/proto/uuidpb" + "px.dev/pixie/src/carnot/planner/file_source/ir" + "px.dev/pixie/src/common/base/statuspb" + "px.dev/pixie/src/utils" + "px.dev/pixie/src/vizier/messages/messagespb" + "px.dev/pixie/src/vizier/services/metadata/storepb" + "px.dev/pixie/src/vizier/services/shared/agentpb" +) + +var ( + // ErrFileSourceAlreadyExists is produced if a file_source already exists with the given name + // and does not have a matching schema. + ErrFileSourceAlreadyExists = errors.New("FileSource already exists") +) + +// agentMessenger is a controller that lets us message all agents and all active agents. +type agentMessenger interface { + MessageAgents(agentIDs []uuid.UUID, msg []byte) error + MessageActiveAgents(msg []byte) error +} + +// Store is a datastore which can store, update, and retrieve information about file_sources. +type Store interface { + UpsertFileSource(uuid.UUID, *storepb.FileSourceInfo) error + GetFileSource(uuid.UUID) (*storepb.FileSourceInfo, error) + GetFileSources() ([]*storepb.FileSourceInfo, error) + UpdateFileSourceState(*storepb.AgentFileSourceStatus) error + GetFileSourceStates(uuid.UUID) ([]*storepb.AgentFileSourceStatus, error) + SetFileSourceWithName(string, uuid.UUID) error + GetFileSourcesWithNames([]string) ([]*uuid.UUID, error) + GetFileSourcesForIDs([]uuid.UUID) ([]*storepb.FileSourceInfo, error) + SetFileSourceTTL(uuid.UUID, time.Duration) error + DeleteFileSourceTTLs([]uuid.UUID) error + DeleteFileSource(uuid.UUID) error + DeleteFileSourcesForAgent(uuid.UUID) error + GetFileSourceTTLs() ([]uuid.UUID, []time.Time, error) +} + +// Manager manages the file_sources deployed in the cluster. +type Manager struct { + ts Store + agtMgr agentMessenger + + done chan struct{} + once sync.Once +} + +// NewManager creates a new file_source manager. +func NewManager(ts Store, agtMgr agentMessenger, ttlReaperDuration time.Duration) *Manager { + tm := &Manager{ + ts: ts, + agtMgr: agtMgr, + done: make(chan struct{}), + } + + go tm.watchForFileSourceExpiry(ttlReaperDuration) + return tm +} + +func (m *Manager) watchForFileSourceExpiry(ttlReaperDuration time.Duration) { + ticker := time.NewTicker(ttlReaperDuration) + defer ticker.Stop() + for { + select { + case <-m.done: + return + case <-ticker.C: + m.terminateExpiredFileSources() + } + } +} + +func (m *Manager) terminateExpiredFileSources() { + fss, err := m.ts.GetFileSources() + if err != nil { + log.WithError(err).Warn("error encountered when trying to terminating expired file_sources") + return + } + + ttlKeys, ttlVals, err := m.ts.GetFileSourceTTLs() + if err != nil { + log.WithError(err).Warn("error encountered when trying to terminating expired file_sources") + return + } + + now := time.Now() + + // Lookup for file_sources that still have an active ttl + fsActive := make(map[uuid.UUID]bool) + for i, fs := range ttlKeys { + fsActive[fs] = ttlVals[i].After(now) + } + + for _, fs := range fss { + fsID := utils.UUIDFromProtoOrNil(fs.ID) + if fsActive[fsID] { + // FileSource TTL exists and is in the future + continue + } + if fs.ExpectedState == statuspb.TERMINATED_STATE { + // FileSource is already in terminated state + continue + } + err = m.terminateFileSource(fsID) + if err != nil { + log.WithError(err).Warn("error encountered when trying to terminating expired file_sources") + } + } +} + +func (m *Manager) terminateFileSource(id uuid.UUID) error { + // Update state in datastore to terminated. + fs, err := m.ts.GetFileSource(id) + if err != nil { + return err + } + + if fs == nil { + return nil + } + + fs.ExpectedState = statuspb.TERMINATED_STATE + err = m.ts.UpsertFileSource(id, fs) + if err != nil { + return err + } + + // Send termination messages to PEMs. + fileSourceReq := messagespb.VizierMessage{ + Msg: &messagespb.VizierMessage_FileSourceMessage{ + FileSourceMessage: &messagespb.FileSourceMessage{ + Msg: &messagespb.FileSourceMessage_RemoveFileSourceRequest{ + RemoveFileSourceRequest: &messagespb.RemoveFileSourceRequest{ + ID: utils.ProtoFromUUID(id), + }, + }, + }, + }, + } + msg, err := fileSourceReq.Marshal() + if err != nil { + return err + } + + return m.agtMgr.MessageActiveAgents(msg) +} + +func (m *Manager) deleteFileSource(id uuid.UUID) error { + return m.ts.DeleteFileSource(id) +} + +// CreateFileSource creates and stores info about the given file source. +func (m *Manager) CreateFileSource(fileSourceName string, fileSourceDeployment *ir.FileSourceDeployment) (*uuid.UUID, error) { + // Check to see if a file source with the matching name already exists. + resp, err := m.ts.GetFileSourcesWithNames([]string{fileSourceName}) + if err != nil { + return nil, err + } + + if len(resp) != 1 { + return nil, errors.New("Could not fetch fileSource") + } + prevFileSourceID := resp[0] + + ttl, err := types.DurationFromProto(fileSourceDeployment.TTL) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to parse duration: %+v", err)) + } + + if prevFileSourceID != nil { // Existing file source already exists. + prevFileSource, err := m.ts.GetFileSource(*prevFileSourceID) + if err != nil { + return nil, err + } + if prevFileSource != nil && prevFileSource.ExpectedState != statuspb.TERMINATED_STATE { + // If everything is exactly the same, no need to redeploy + // - return prevFileSourceID, ErrFileSourceAlreadyExists + // If anything inside file sources has changed + // - delete old file sources, and insert new file sources. + + // Check if the file sources are exactly the same. + allFsSame := true + if !proto.Equal(prevFileSource.FileSource, fileSourceDeployment) { + allFsSame = false + } + + if allFsSame { + err = m.ts.SetFileSourceTTL(*prevFileSourceID, ttl) + if err != nil { + return nil, err + } + return prevFileSourceID, ErrFileSourceAlreadyExists + } + + // Something has changed, so trigger termination of the old file source. + err = m.ts.DeleteFileSourceTTLs([]uuid.UUID{*prevFileSourceID}) + if err != nil { + return nil, err + } + } + } + + fsID, err := uuid.NewV4() + if err != nil { + return nil, err + } + newFileSource := &storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(fsID), + Name: fileSourceName, + FileSource: fileSourceDeployment, + ExpectedState: statuspb.RUNNING_STATE, + } + err = m.ts.UpsertFileSource(fsID, newFileSource) + if err != nil { + return nil, err + } + err = m.ts.SetFileSourceTTL(fsID, ttl) + if err != nil { + return nil, err + } + err = m.ts.SetFileSourceWithName(fileSourceName, fsID) + if err != nil { + return nil, err + } + return &fsID, nil +} + +// GetAllFileSources gets all the file sources currently tracked by the metadata service. +func (m *Manager) GetAllFileSources() ([]*storepb.FileSourceInfo, error) { + return m.ts.GetFileSources() +} + +// UpdateAgentFileSourceStatus updates the file source info with the new agent file source status. +func (m *Manager) UpdateAgentFileSourceStatus(fileSourceID *uuidpb.UUID, agentID *uuidpb.UUID, state statuspb.LifeCycleState, status *statuspb.Status) error { + if state == statuspb.TERMINATED_STATE { // If all agent file source statuses are now terminated, we can finally delete the file source from the datastore. + tID := utils.UUIDFromProtoOrNil(fileSourceID) + states, err := m.GetFileSourceStates(tID) + if err != nil { + return err + } + allTerminated := true + for _, s := range states { + if s.State != statuspb.TERMINATED_STATE && !s.AgentID.Equal(agentID) { + allTerminated = false + break + } + } + + if allTerminated { + return m.deleteFileSource(tID) + } + } + + fileSourceState := &storepb.AgentFileSourceStatus{ + State: state, + Status: status, + ID: fileSourceID, + AgentID: agentID, + } + + return m.ts.UpdateFileSourceState(fileSourceState) +} + +// RegisterFileSource sends requests to the given agents to register the specified file source. +func (m *Manager) RegisterFileSource(agents []*agentpb.Agent, fileSourceID uuid.UUID, fileSourceDeployment *ir.FileSourceDeployment) error { + agentIDs := make([]uuid.UUID, len(agents)) + fileSourceReq := messagespb.VizierMessage{ + Msg: &messagespb.VizierMessage_FileSourceMessage{ + FileSourceMessage: &messagespb.FileSourceMessage{ + Msg: &messagespb.FileSourceMessage_RegisterFileSourceRequest{ + RegisterFileSourceRequest: &messagespb.RegisterFileSourceRequest{ + FileSourceDeployment: fileSourceDeployment, + ID: utils.ProtoFromUUID(fileSourceID), + }, + }, + }, + }, + } + msg, err := fileSourceReq.Marshal() + if err != nil { + return err + } + for i, agt := range agents { + agentIDs[i] = utils.UUIDFromProtoOrNil(agt.Info.AgentID) + } + + err = m.agtMgr.MessageAgents(agentIDs, msg) + + if err != nil { + return err + } + + return nil +} + +// GetFileSourceInfo gets the status for the file source with the given ID. +func (m *Manager) GetFileSourceInfo(fileSourceID uuid.UUID) (*storepb.FileSourceInfo, error) { + return m.ts.GetFileSource(fileSourceID) +} + +// GetFileSourceStates gets all the known agent states for the given file source. +func (m *Manager) GetFileSourceStates(fileSourceID uuid.UUID) ([]*storepb.AgentFileSourceStatus, error) { + return m.ts.GetFileSourceStates(fileSourceID) +} + +// GetFileSourcesForIDs gets all the file source infos for the given ids. +func (m *Manager) GetFileSourcesForIDs(ids []uuid.UUID) ([]*storepb.FileSourceInfo, error) { + return m.ts.GetFileSourcesForIDs(ids) +} + +// RemoveFileSources starts the termination process for the file sources with the given names. +func (m *Manager) RemoveFileSources(names []string) error { + fsIDs, err := m.ts.GetFileSourcesWithNames(names) + if err != nil { + return err + } + + ids := make([]uuid.UUID, len(fsIDs)) + + for i, id := range fsIDs { + if id == nil { + return fmt.Errorf("Could not find file source for given name: %s", names[i]) + } + ids[i] = *id + } + + return m.ts.DeleteFileSourceTTLs(ids) +} + +// DeleteAgent deletes file sources on the given agent. +func (m *Manager) DeleteAgent(agentID uuid.UUID) error { + return m.ts.DeleteFileSourcesForAgent(agentID) +} + +// Close cleans up the goroutines created and renders this no longer useable. +func (m *Manager) Close() { + m.once.Do(func() { + close(m.done) + }) + m.ts = nil + m.agtMgr = nil +} diff --git a/src/vizier/services/metadata/controllers/file_source/file_source_store.go b/src/vizier/services/metadata/controllers/file_source/file_source_store.go new file mode 100644 index 00000000000..8ad9d729a0a --- /dev/null +++ b/src/vizier/services/metadata/controllers/file_source/file_source_store.go @@ -0,0 +1,309 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package file_source + +import ( + "path" + "strings" + "time" + + "github.com/gofrs/uuid" + "github.com/gogo/protobuf/proto" + "golang.org/x/sync/errgroup" + + "px.dev/pixie/src/api/proto/uuidpb" + "px.dev/pixie/src/utils" + "px.dev/pixie/src/vizier/services/metadata/storepb" + "px.dev/pixie/src/vizier/utils/datastore" +) + +const ( + fileSourcesPrefix = "/fileSource/" + fileSourceStatesPrefix = "/fileSourceStates/" + fileSourceTTLsPrefix = "/fileSourceTTL/" + fileSourceNamesPrefix = "/fileSourceName/" +) + +// Datastore implements the FileSourceStore interface on a given Datastore. +type Datastore struct { + ds datastore.MultiGetterSetterDeleterCloser +} + +// NewDatastore wraps the datastore in a file source store +func NewDatastore(ds datastore.MultiGetterSetterDeleterCloser) *Datastore { + return &Datastore{ds: ds} +} + +func getFileSourceWithNameKey(fileSourceName string) string { + return path.Join(fileSourceNamesPrefix, fileSourceName) +} + +func getFileSourceKey(fileSourceID uuid.UUID) string { + return path.Join(fileSourcesPrefix, fileSourceID.String()) +} + +func getFileSourceStatesKey(fileSourceID uuid.UUID) string { + return path.Join(fileSourceStatesPrefix, fileSourceID.String()) +} + +func getFileSourceStateKey(fileSourceID uuid.UUID, agentID uuid.UUID) string { + return path.Join(fileSourceStatesPrefix, fileSourceID.String(), agentID.String()) +} + +func getFileSourceTTLKey(fileSourceID uuid.UUID) string { + return path.Join(fileSourceTTLsPrefix, fileSourceID.String()) +} + +// GetFileSourcesWithNames gets which file source is associated with the given name. +func (t *Datastore) GetFileSourcesWithNames(fileSourceNames []string) ([]*uuid.UUID, error) { + eg := errgroup.Group{} + ids := make([]*uuid.UUID, len(fileSourceNames)) + for i := 0; i < len(fileSourceNames); i++ { + i := i // Closure for goroutine + eg.Go(func() error { + val, err := t.ds.Get(getFileSourceWithNameKey(fileSourceNames[i])) + if err != nil { + return err + } + if val == nil { + return nil + } + uuidPB := &uuidpb.UUID{} + err = proto.Unmarshal(val, uuidPB) + if err != nil { + return err + } + id := utils.UUIDFromProtoOrNil(uuidPB) + ids[i] = &id + return nil + }) + } + err := eg.Wait() + if err != nil { + return nil, err + } + + return ids, nil +} + +// SetFileSourceWithName associates the file source with the given name with the one with the provided ID. +func (t *Datastore) SetFileSourceWithName(fileSourceName string, fileSourceID uuid.UUID) error { + fileSourceIDpb := utils.ProtoFromUUID(fileSourceID) + val, err := fileSourceIDpb.Marshal() + if err != nil { + return err + } + + return t.ds.Set(getFileSourceWithNameKey(fileSourceName), string(val)) +} + +// UpsertFileSource updates or creates a new file source entry in the store. +func (t *Datastore) UpsertFileSource(fileSourceID uuid.UUID, fileSourceInfo *storepb.FileSourceInfo) error { + val, err := fileSourceInfo.Marshal() + if err != nil { + return err + } + + return t.ds.Set(getFileSourceKey(fileSourceID), string(val)) +} + +// DeleteFileSource deletes the file source from the store. +func (t *Datastore) DeleteFileSource(fileSourceID uuid.UUID) error { + err := t.ds.DeleteAll([]string{getFileSourceKey(fileSourceID)}) + if err != nil { + return err + } + + return t.ds.DeleteWithPrefix(getFileSourceStatesKey(fileSourceID)) +} + +// GetFileSource gets the file source info from the store, if it exists. +func (t *Datastore) GetFileSource(fileSourceID uuid.UUID) (*storepb.FileSourceInfo, error) { + resp, err := t.ds.Get(getFileSourceKey(fileSourceID)) + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + + fileSourcePb := &storepb.FileSourceInfo{} + err = proto.Unmarshal(resp, fileSourcePb) + if err != nil { + return nil, err + } + return fileSourcePb, nil +} + +// GetFileSources gets all of the file source s in the store. +func (t *Datastore) GetFileSources() ([]*storepb.FileSourceInfo, error) { + _, vals, err := t.ds.GetWithPrefix(fileSourcesPrefix) + if err != nil { + return nil, err + } + + fileSources := make([]*storepb.FileSourceInfo, len(vals)) + for i, val := range vals { + pb := &storepb.FileSourceInfo{} + err := proto.Unmarshal(val, pb) + if err != nil { + continue + } + fileSources[i] = pb + } + return fileSources, nil +} + +// GetFileSourcesForIDs gets all of the file source s with the given it.ds. +func (t *Datastore) GetFileSourcesForIDs(ids []uuid.UUID) ([]*storepb.FileSourceInfo, error) { + eg := errgroup.Group{} + fileSources := make([]*storepb.FileSourceInfo, len(ids)) + for i := 0; i < len(ids); i++ { + i := i // Closure for goroutine + eg.Go(func() error { + val, err := t.ds.Get(getFileSourceKey(ids[i])) + if err != nil { + return err + } + if val == nil { + return nil + } + fs := &storepb.FileSourceInfo{} + err = proto.Unmarshal(val, fs) + if err != nil { + return err + } + fileSources[i] = fs + return nil + }) + } + + err := eg.Wait() + if err != nil { + return nil, err + } + + return fileSources, nil +} + +// UpdateFileSourceState updates the agent file source state in the store. +func (t *Datastore) UpdateFileSourceState(state *storepb.AgentFileSourceStatus) error { + val, err := state.Marshal() + if err != nil { + return err + } + + fsID := utils.UUIDFromProtoOrNil(state.ID) + + return t.ds.Set(getFileSourceStateKey(fsID, utils.UUIDFromProtoOrNil(state.AgentID)), string(val)) +} + +// GetFileSourceStates gets all the agentFileSource states for the given file source . +func (t *Datastore) GetFileSourceStates(fileSourceID uuid.UUID) ([]*storepb.AgentFileSourceStatus, error) { + _, vals, err := t.ds.GetWithPrefix(getFileSourceStatesKey(fileSourceID)) + if err != nil { + return nil, err + } + + fileSources := make([]*storepb.AgentFileSourceStatus, len(vals)) + for i, val := range vals { + pb := &storepb.AgentFileSourceStatus{} + err := proto.Unmarshal(val, pb) + if err != nil { + continue + } + fileSources[i] = pb + } + return fileSources, nil +} + +// SetFileSourceTTL creates a key in the datastore with the given TTL. This represents the amount of time +// that the given file source should be persisted before terminating. +func (t *Datastore) SetFileSourceTTL(fileSourceID uuid.UUID, ttl time.Duration) error { + expiresAt := time.Now().Add(ttl) + encodedExpiry, err := expiresAt.MarshalBinary() + if err != nil { + return err + } + return t.ds.SetWithTTL(getFileSourceTTLKey(fileSourceID), string(encodedExpiry), ttl) +} + +// DeleteFileSourceTTLs deletes the key in the datastore for the given file source TTLs. +// This is done as a single transaction, so if any deletes fail, they all fail. +func (t *Datastore) DeleteFileSourceTTLs(ids []uuid.UUID) error { + keys := make([]string, len(ids)) + for i, id := range ids { + keys[i] = getFileSourceTTLKey(id) + } + + return t.ds.DeleteAll(keys) +} + +// DeleteFileSourcesForAgent deletes the file source s for a given agent. +// Note this only purges the combo file source ID+agentID keys. Said +// file source s might still be valid and deployed on other agents. +func (t *Datastore) DeleteFileSourcesForAgent(agentID uuid.UUID) error { + fss, err := t.GetFileSources() + if err != nil { + return err + } + + delKeys := make([]string, len(fss)) + for i, fs := range fss { + delKeys[i] = getFileSourceStateKey(utils.UUIDFromProtoOrNil(fs.ID), agentID) + } + + return t.ds.DeleteAll(delKeys) +} + +// GetFileSourceTTLs gets the file source s which still have existing TTLs. +func (t *Datastore) GetFileSourceTTLs() ([]uuid.UUID, []time.Time, error) { + keys, vals, err := t.ds.GetWithPrefix(fileSourceTTLsPrefix) + if err != nil { + return nil, nil, err + } + + var ids []uuid.UUID + var expirations []time.Time + + for i, k := range keys { + keyParts := strings.Split(k, "/") + if len(keyParts) != 3 { + continue + } + id, err := uuid.FromString(keyParts[2]) + if err != nil { + continue + } + var expiresAt time.Time + err = expiresAt.UnmarshalBinary(vals[i]) + if err != nil { + // This shouldn't happen for new keys, but we might have added TTLs + // in the past without a value. So just pick some time sufficiently + // in the future. + // This value is only used to determine what file source s are expired + // as of _NOW_ so this is "safe". + expiresAt = time.Now().Add(30 * 24 * time.Hour) + } + ids = append(ids, id) + expirations = append(expirations, expiresAt) + } + + return ids, expirations, nil +} diff --git a/src/vizier/services/metadata/controllers/file_source/file_source_store_test.go b/src/vizier/services/metadata/controllers/file_source/file_source_store_test.go new file mode 100644 index 00000000000..f43caa8271e --- /dev/null +++ b/src/vizier/services/metadata/controllers/file_source/file_source_store_test.go @@ -0,0 +1,364 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package file_source + +import ( + "os" + "testing" + "time" + + "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/vfs" + "github.com/gofrs/uuid" + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "px.dev/pixie/src/api/proto/uuidpb" + "px.dev/pixie/src/common/base/statuspb" + "px.dev/pixie/src/utils" + "px.dev/pixie/src/vizier/services/metadata/storepb" + "px.dev/pixie/src/vizier/utils/datastore/pebbledb" +) + +func setupTest(t *testing.T) (*pebbledb.DataStore, *Datastore, func()) { + memFS := vfs.NewMem() + c, err := pebble.Open("test", &pebble.Options{ + FS: memFS, + }) + if err != nil { + t.Fatal("failed to initialize a pebbledb") + os.Exit(1) + } + + db := pebbledb.New(c, 3*time.Second) + ts := NewDatastore(db) + cleanup := func() { + err := db.Close() + if err != nil { + t.Fatal("Failed to close db") + } + } + + return db, ts, cleanup +} + +func TestFileSourceStore_UpsertFileSource(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + tpID := uuid.Must(uuid.NewV4()) + // Create file sources. + s1 := &storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(tpID), + } + + err := ts.UpsertFileSource(tpID, s1) + require.NoError(t, err) + + savedFileSource, err := db.Get("/fileSource/" + tpID.String()) + require.NoError(t, err) + savedFileSourcePb := &storepb.FileSourceInfo{} + err = proto.Unmarshal(savedFileSource, savedFileSourcePb) + require.NoError(t, err) + assert.Equal(t, s1, savedFileSourcePb) +} + +func TestFileSourceStore_GetFileSource(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + tpID := uuid.Must(uuid.NewV4()) + // Create file sources. + s1 := &storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(tpID), + } + s1Text, err := s1.Marshal() + if err != nil { + t.Fatal("Unable to marshal file source pb") + } + + err = db.Set("/fileSource/"+tpID.String(), string(s1Text)) + require.NoError(t, err) + + fileSource, err := ts.GetFileSource(tpID) + require.NoError(t, err) + assert.NotNil(t, fileSource) + + assert.Equal(t, s1.ID, fileSource.ID) +} + +func TestFileSourceStore_GetFileSources(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + // Create file sources. + s1ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c8") + s1 := &storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(s1ID), + } + s1Text, err := s1.Marshal() + if err != nil { + t.Fatal("Unable to marshal file source pb") + } + + s2ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c9") + s2 := &storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(s2ID), + } + s2Text, err := s2.Marshal() + if err != nil { + t.Fatal("Unable to marshal file source pb") + } + + err = db.Set("/fileSource/"+s1ID.String(), string(s1Text)) + require.NoError(t, err) + err = db.Set("/fileSource/"+s2ID.String(), string(s2Text)) + require.NoError(t, err) + + fileSources, err := ts.GetFileSources() + require.NoError(t, err) + assert.Equal(t, 2, len(fileSources)) + + ids := make([]string, len(fileSources)) + for i, tp := range fileSources { + ids[i] = utils.ProtoToUUIDStr(tp.ID) + } + + assert.Contains(t, ids, utils.ProtoToUUIDStr(s1.ID)) + assert.Contains(t, ids, utils.ProtoToUUIDStr(s2.ID)) +} + +func TestFileSourceStore_GetFileSourcesForIDs(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + // Create file sources. + s1ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c8") + s1 := &storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(s1ID), + } + s1Text, err := s1.Marshal() + if err != nil { + t.Fatal("Unable to marshal file source pb") + } + + s2ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c9") + s2 := &storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(s2ID), + } + s2Text, err := s2.Marshal() + if err != nil { + t.Fatal("Unable to marshal file source pb") + } + + s3ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c7") + + err = db.Set("/fileSource/"+s1ID.String(), string(s1Text)) + require.NoError(t, err) + err = db.Set("/fileSource/"+s2ID.String(), string(s2Text)) + require.NoError(t, err) + + fileSources, err := ts.GetFileSourcesForIDs([]uuid.UUID{s1ID, s2ID, s3ID}) + require.NoError(t, err) + assert.Equal(t, 3, len(fileSources)) + + ids := make([]string, len(fileSources)) + for i, tp := range fileSources { + if tp == nil || tp.ID == nil { + continue + } + ids[i] = utils.ProtoToUUIDStr(tp.ID) + } + + assert.Contains(t, ids, utils.ProtoToUUIDStr(s1.ID)) + assert.Contains(t, ids, utils.ProtoToUUIDStr(s2.ID)) +} + +func TestFileSourceStore_UpdateFileSourceState(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + agentID := uuid.Must(uuid.NewV4()) + tpID := uuid.Must(uuid.NewV4()) + // Create file source state + s1 := &storepb.AgentFileSourceStatus{ + ID: utils.ProtoFromUUID(tpID), + AgentID: utils.ProtoFromUUID(agentID), + State: statuspb.RUNNING_STATE, + } + + err := ts.UpdateFileSourceState(s1) + require.NoError(t, err) + + savedFileSource, err := db.Get("/fileSourceStates/" + tpID.String() + "/" + agentID.String()) + require.NoError(t, err) + savedFileSourcePb := &storepb.AgentFileSourceStatus{} + err = proto.Unmarshal(savedFileSource, savedFileSourcePb) + require.NoError(t, err) + assert.Equal(t, s1, savedFileSourcePb) +} + +func TestFileSourceStore_GetFileSourceStates(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + tpID := uuid.Must(uuid.NewV4()) + + agentID1 := uuid.FromStringOrNil("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + agentID2 := uuid.FromStringOrNil("6ba7b810-9dad-11d1-80b4-00c04fd430c9") + + // Create file sources. + s1 := &storepb.AgentFileSourceStatus{ + ID: utils.ProtoFromUUID(tpID), + AgentID: utils.ProtoFromUUID(agentID1), + State: statuspb.RUNNING_STATE, + } + s1Text, err := s1.Marshal() + if err != nil { + t.Fatal("Unable to marshal file source pb") + } + + s2 := &storepb.AgentFileSourceStatus{ + ID: utils.ProtoFromUUID(tpID), + AgentID: utils.ProtoFromUUID(agentID2), + State: statuspb.PENDING_STATE, + } + s2Text, err := s2.Marshal() + if err != nil { + t.Fatal("Unable to marshal file source pb") + } + + err = db.Set("/fileSourceStates/"+tpID.String()+"/"+agentID1.String(), string(s1Text)) + require.NoError(t, err) + err = db.Set("/fileSourceStates/"+tpID.String()+"/"+agentID2.String(), string(s2Text)) + require.NoError(t, err) + + fileSources, err := ts.GetFileSourceStates(tpID) + require.NoError(t, err) + assert.Equal(t, 2, len(fileSources)) + + agentIDs := make([]string, len(fileSources)) + for i, tp := range fileSources { + agentIDs[i] = utils.ProtoToUUIDStr(tp.AgentID) + } + + assert.Contains(t, agentIDs, utils.ProtoToUUIDStr(s1.AgentID)) + assert.Contains(t, agentIDs, utils.ProtoToUUIDStr(s2.AgentID)) +} + +func TestFileSourceStore_SetFileSourceWithName(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + tpID := uuid.Must(uuid.NewV4()) + + err := ts.SetFileSourceWithName("test", tpID) + require.NoError(t, err) + + savedFileSource, err := db.Get("/fileSourceName/test") + require.NoError(t, err) + savedFileSourcePb := &uuidpb.UUID{} + err = proto.Unmarshal(savedFileSource, savedFileSourcePb) + require.NoError(t, err) + assert.Equal(t, tpID, utils.UUIDFromProtoOrNil(savedFileSourcePb)) +} + +func TestFileSourceStore_GetFileSourcesWithNames(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + tpID := uuid.Must(uuid.NewV4()) + fileSourceIDpb := utils.ProtoFromUUID(tpID) + val, err := fileSourceIDpb.Marshal() + require.NoError(t, err) + + tpID2 := uuid.Must(uuid.NewV4()) + fileSourceIDpb2 := utils.ProtoFromUUID(tpID2) + val2, err := fileSourceIDpb2.Marshal() + require.NoError(t, err) + + err = db.Set("/fileSourceName/test", string(val)) + require.NoError(t, err) + err = db.Set("/fileSourceName/test2", string(val2)) + require.NoError(t, err) + + fileSources, err := ts.GetFileSourcesWithNames([]string{"test", "test2"}) + require.NoError(t, err) + assert.Equal(t, 2, len(fileSources)) + + tps := make([]string, len(fileSources)) + for i, tp := range fileSources { + tps[i] = tp.String() + } + + assert.Contains(t, tps, tpID.String()) + assert.Contains(t, tps, tpID2.String()) +} + +func TestFileSourceStore_DeleteFileSource(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + tpID := uuid.Must(uuid.NewV4()) + + err := db.Set("/fileSource/"+tpID.String(), "test") + require.NoError(t, err) + + err = ts.DeleteFileSource(tpID) + require.NoError(t, err) + + val, err := db.Get("/fileSource/" + tpID.String()) + require.NoError(t, err) + assert.Nil(t, val) +} + +func TestFileSourceStore_DeleteFileSourceTTLs(t *testing.T) { + _, ts, cleanup := setupTest(t) + defer cleanup() + + tpID := uuid.Must(uuid.NewV4()) + tpID2 := uuid.Must(uuid.NewV4()) + + err := ts.DeleteFileSourceTTLs([]uuid.UUID{tpID, tpID2}) + require.NoError(t, err) +} + +func TestFileSourceStore_GetFileSourceTTLs(t *testing.T) { + db, ts, cleanup := setupTest(t) + defer cleanup() + + // Create file sources. + s1ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c8") + s2ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c9") + + err := db.Set("/fileSourceTTL/"+s1ID.String(), "") + require.NoError(t, err) + err = db.Set("/fileSourceTTL/"+s2ID.String(), "") + require.NoError(t, err) + err = db.Set("/fileSourceTTL/invalid", "") + require.NoError(t, err) + + fileSources, _, err := ts.GetFileSourceTTLs() + require.NoError(t, err) + assert.Equal(t, 2, len(fileSources)) + + assert.Contains(t, fileSources, s1ID) + assert.Contains(t, fileSources, s2ID) +} diff --git a/src/vizier/services/metadata/controllers/file_source/file_source_test.go b/src/vizier/services/metadata/controllers/file_source/file_source_test.go new file mode 100644 index 00000000000..f6ac693bca1 --- /dev/null +++ b/src/vizier/services/metadata/controllers/file_source/file_source_test.go @@ -0,0 +1,528 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package file_source_test + +import ( + "sync" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "px.dev/pixie/src/carnot/planner/file_source/ir" + "px.dev/pixie/src/common/base/statuspb" + "px.dev/pixie/src/utils" + "px.dev/pixie/src/vizier/messages/messagespb" + mock_agent "px.dev/pixie/src/vizier/services/metadata/controllers/agent/mock" + "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" + mock_file_source "px.dev/pixie/src/vizier/services/metadata/controllers/file_source/mock" + "px.dev/pixie/src/vizier/services/metadata/storepb" + "px.dev/pixie/src/vizier/services/shared/agentpb" +) + +func TestCreateFileSource(t *testing.T) { + tests := []struct { + name string + originalFileSource *ir.FileSourceDeployment + originalFileSourceState statuspb.LifeCycleState + newFileSource *ir.FileSourceDeployment + expectError bool + expectOldUpdated bool + expectTTLUpdateOnly bool + }{ + { + name: "test_file_source", + originalFileSource: nil, + newFileSource: &ir.FileSourceDeployment{ + GlobPattern: "/tmp/test", + TableName: "/tmp/test", + TTL: &types.Duration{ + Seconds: 5, + }, + }, + expectError: false, + }, + { + name: "existing file source match", + originalFileSource: &ir.FileSourceDeployment{ + GlobPattern: "/tmp/test", + TableName: "/tmp/test", + TTL: &types.Duration{ + Seconds: 5, + }, + }, + originalFileSourceState: statuspb.RUNNING_STATE, + newFileSource: &ir.FileSourceDeployment{ + GlobPattern: "/tmp/test", + TableName: "/tmp/test", + TTL: &types.Duration{ + Seconds: 5, + }, + }, + expectTTLUpdateOnly: true, + }, + { + name: "existing file source, not exactly the same (1)", + originalFileSource: &ir.FileSourceDeployment{ + GlobPattern: "/tmp/test", + TableName: "/tmp/test", + TTL: &types.Duration{ + Seconds: 5, + }, + }, + originalFileSourceState: statuspb.RUNNING_STATE, + newFileSource: &ir.FileSourceDeployment{ + GlobPattern: "/tmp/test.json", + TableName: "/tmp/test", + TTL: &types.Duration{ + Seconds: 5, + }, + }, + expectOldUpdated: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + origID := uuid.Must(uuid.NewV4()) + + if test.originalFileSource == nil { + mockFileSourceStore. + EXPECT(). + GetFileSourcesWithNames([]string{"test_file_source"}). + Return([]*uuid.UUID{nil}, nil) + } else { + mockFileSourceStore. + EXPECT(). + GetFileSourcesWithNames([]string{"test_file_source"}). + Return([]*uuid.UUID{&origID}, nil) + mockFileSourceStore. + EXPECT(). + GetFileSource(origID). + Return(&storepb.FileSourceInfo{ + ExpectedState: test.originalFileSourceState, + FileSource: test.originalFileSource, + }, nil) + } + + if test.expectTTLUpdateOnly { + mockFileSourceStore. + EXPECT(). + SetFileSourceTTL(origID, time.Second*5) + } + + if test.expectOldUpdated { + mockFileSourceStore. + EXPECT(). + DeleteFileSourceTTLs([]uuid.UUID{origID}). + Return(nil) + } + + var newID uuid.UUID + + if !test.expectError && !test.expectTTLUpdateOnly { + mockFileSourceStore. + EXPECT(). + UpsertFileSource(gomock.Any(), gomock.Any()). + DoAndReturn(func(id uuid.UUID, tpInfo *storepb.FileSourceInfo) error { + newID = id + assert.Equal(t, &storepb.FileSourceInfo{ + FileSource: test.newFileSource, + Name: "test_file_source", + ID: utils.ProtoFromUUID(id), + ExpectedState: statuspb.RUNNING_STATE, + }, tpInfo) + return nil + }) + + mockFileSourceStore. + EXPECT(). + SetFileSourceWithName("test_file_source", gomock.Any()). + DoAndReturn(func(name string, id uuid.UUID) error { + assert.Equal(t, newID, id) + return nil + }) + + mockFileSourceStore. + EXPECT(). + SetFileSourceTTL(gomock.Any(), time.Second*5). + DoAndReturn(func(id uuid.UUID, ttl time.Duration) error { + assert.Equal(t, newID, id) + return nil + }) + } + + mockAgtMgr := mock_agent.NewMockManager(ctrl) + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + defer fileSourceMgr.Close() + + actualFsID, err := fileSourceMgr.CreateFileSource("test_file_source", test.newFileSource) + if test.expectError || test.expectTTLUpdateOnly { + assert.Equal(t, file_source.ErrFileSourceAlreadyExists, err) + } else { + require.NoError(t, err) + assert.Equal(t, &newID, actualFsID) + } + }) + } +} + +func TestGetFileSources(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockAgtMgr := mock_agent.NewMockManager(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + defer fileSourceMgr.Close() + + tID1 := uuid.Must(uuid.NewV4()) + tID2 := uuid.Must(uuid.NewV4()) + expectedFileSourceInfo := []*storepb.FileSourceInfo{ + { + ID: utils.ProtoFromUUID(tID1), + }, + { + ID: utils.ProtoFromUUID(tID2), + }, + } + + mockFileSourceStore. + EXPECT(). + GetFileSources(). + Return(expectedFileSourceInfo, nil) + + fileSources, err := fileSourceMgr.GetAllFileSources() + require.NoError(t, err) + assert.Equal(t, expectedFileSourceInfo, fileSources) +} + +func TestGetFileSourceInfo(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockAgtMgr := mock_agent.NewMockManager(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + defer fileSourceMgr.Close() + + fsID1 := uuid.Must(uuid.NewV4()) + expectedFileSourceInfo := &storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(fsID1), + } + + mockFileSourceStore. + EXPECT(). + GetFileSource(fsID1). + Return(expectedFileSourceInfo, nil) + + fileSources, err := fileSourceMgr.GetFileSourceInfo(fsID1) + require.NoError(t, err) + assert.Equal(t, expectedFileSourceInfo, fileSources) +} + +func TestGetFileSourceStates(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockAgtMgr := mock_agent.NewMockManager(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + defer fileSourceMgr.Close() + + agentUUID1 := uuid.Must(uuid.NewV4()) + tID1 := uuid.Must(uuid.NewV4()) + expectedFileSourceStatus1 := &storepb.AgentFileSourceStatus{ + ID: utils.ProtoFromUUID(tID1), + AgentID: utils.ProtoFromUUID(agentUUID1), + State: statuspb.RUNNING_STATE, + } + + agentUUID2 := uuid.Must(uuid.NewV4()) + expectedFileSourceStatus2 := &storepb.AgentFileSourceStatus{ + ID: utils.ProtoFromUUID(tID1), + AgentID: utils.ProtoFromUUID(agentUUID2), + State: statuspb.PENDING_STATE, + } + + mockFileSourceStore. + EXPECT(). + GetFileSourceStates(tID1). + Return([]*storepb.AgentFileSourceStatus{expectedFileSourceStatus1, expectedFileSourceStatus2}, nil) + + fileSources, err := fileSourceMgr.GetFileSourceStates(tID1) + require.NoError(t, err) + assert.Equal(t, expectedFileSourceStatus1, fileSources[0]) + assert.Equal(t, expectedFileSourceStatus2, fileSources[1]) +} + +func TestRegisterFileSource(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockAgtMgr := mock_agent.NewMockManager(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + defer fileSourceMgr.Close() + + agentUUID1 := uuid.Must(uuid.NewV4()) + agentUUID2 := uuid.Must(uuid.NewV4()) + upb1 := utils.ProtoFromUUID(agentUUID1) + upb2 := utils.ProtoFromUUID(agentUUID2) + mockAgents := []*agentpb.Agent{ + // Should match programUpTo5.18.0 and programFrom5.10.0To5.18.0 + { + Info: &agentpb.AgentInfo{ + AgentID: upb1, + }, + }, + { + Info: &agentpb.AgentInfo{ + AgentID: upb2, + }, + }, + } + + fileSourceID := uuid.Must(uuid.NewV4()) + fileSourceDeployment := &ir.FileSourceDeployment{} + expectedFileSourceReq := messagespb.VizierMessage{ + Msg: &messagespb.VizierMessage_FileSourceMessage{ + FileSourceMessage: &messagespb.FileSourceMessage{ + Msg: &messagespb.FileSourceMessage_RegisterFileSourceRequest{ + RegisterFileSourceRequest: &messagespb.RegisterFileSourceRequest{ + FileSourceDeployment: fileSourceDeployment, + ID: utils.ProtoFromUUID(fileSourceID), + }, + }, + }, + }, + } + // Serialize file source request proto into byte slice to compare with the actual message sent to agents. + msg1, err := expectedFileSourceReq.Marshal() + if err != nil { + t.Fatal(err) + } + + mockAgtMgr. + EXPECT(). + MessageAgents([]uuid.UUID{agentUUID1, agentUUID2}, msg1). + Return(nil) + + err = fileSourceMgr.RegisterFileSource(mockAgents, fileSourceID, fileSourceDeployment) + require.NoError(t, err) +} + +func TestUpdateAgentFileSourceStatus(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockAgtMgr := mock_agent.NewMockManager(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + defer fileSourceMgr.Close() + + agentUUID1 := uuid.Must(uuid.NewV4()) + fsID := uuid.Must(uuid.NewV4()) + expectedFileSourceState := &storepb.AgentFileSourceStatus{ + ID: utils.ProtoFromUUID(fsID), + AgentID: utils.ProtoFromUUID(agentUUID1), + State: statuspb.RUNNING_STATE, + } + + mockFileSourceStore. + EXPECT(). + UpdateFileSourceState(expectedFileSourceState). + Return(nil) + + err := fileSourceMgr.UpdateAgentFileSourceStatus(utils.ProtoFromUUID(fsID), utils.ProtoFromUUID(agentUUID1), statuspb.RUNNING_STATE, nil) + require.NoError(t, err) +} + +func TestUpdateAgentFileSourceStatus_Terminated(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockAgtMgr := mock_agent.NewMockManager(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + defer fileSourceMgr.Close() + agentUUID1 := uuid.Must(uuid.NewV4()) + fsID := uuid.Must(uuid.NewV4()) + agentUUID2 := uuid.Must(uuid.NewV4()) + + mockFileSourceStore. + EXPECT(). + GetFileSourceStates(fsID). + Return([]*storepb.AgentFileSourceStatus{ + {AgentID: utils.ProtoFromUUID(agentUUID1), State: statuspb.TERMINATED_STATE}, + {AgentID: utils.ProtoFromUUID(agentUUID2), State: statuspb.RUNNING_STATE}, + }, nil) + + mockFileSourceStore. + EXPECT(). + DeleteFileSource(fsID). + Return(nil) + + err := fileSourceMgr.UpdateAgentFileSourceStatus(utils.ProtoFromUUID(fsID), utils.ProtoFromUUID(agentUUID2), statuspb.TERMINATED_STATE, nil) + require.NoError(t, err) +} + +func TestTTLExpiration(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockAgtMgr := mock_agent.NewMockManager(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + defer fileSourceMgr.Close() + + agentUUID1 := uuid.Must(uuid.NewV4()) + fsID := uuid.Must(uuid.NewV4()) + agentUUID2 := uuid.Must(uuid.NewV4()) + + mockFileSourceStore. + EXPECT(). + GetFileSourceStates(fsID). + Return([]*storepb.AgentFileSourceStatus{ + {AgentID: utils.ProtoFromUUID(agentUUID1), State: statuspb.TERMINATED_STATE}, + {AgentID: utils.ProtoFromUUID(agentUUID2), State: statuspb.RUNNING_STATE}, + }, nil) + + mockFileSourceStore. + EXPECT(). + DeleteFileSource(fsID). + Return(nil) + + err := fileSourceMgr.UpdateAgentFileSourceStatus(utils.ProtoFromUUID(fsID), utils.ProtoFromUUID(agentUUID2), statuspb.TERMINATED_STATE, nil) + require.NoError(t, err) +} + +func TestUpdateAgentFileSourceStatus_RemoveFileSources(t *testing.T) { + // Set up mock. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockAgtMgr := mock_agent.NewMockManager(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + + fsID1 := uuid.Must(uuid.NewV4()) + fsID2 := uuid.Must(uuid.NewV4()) + fsID3 := uuid.Must(uuid.NewV4()) + fsID4 := uuid.Must(uuid.NewV4()) + + mockFileSourceStore. + EXPECT(). + GetFileSources(). + Return([]*storepb.FileSourceInfo{ + { + ID: utils.ProtoFromUUID(fsID1), + }, + { + ID: utils.ProtoFromUUID(fsID2), + }, + { + ID: utils.ProtoFromUUID(fsID3), + }, + { + ID: utils.ProtoFromUUID(fsID4), + ExpectedState: statuspb.TERMINATED_STATE, + }, + }, nil) + + mockFileSourceStore. + EXPECT(). + GetFileSourceTTLs(). + Return([]uuid.UUID{ + fsID1, + fsID3, + fsID4, + }, []time.Time{ + time.Now().Add(1 * time.Hour), + time.Now().Add(-1 * time.Minute), + time.Now().Add(-1 * time.Hour), + }, nil) + + mockFileSourceStore. + EXPECT(). + GetFileSource(fsID2). + Return(&storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(fsID2), + }, nil) + + mockFileSourceStore. + EXPECT(). + GetFileSource(fsID3). + Return(&storepb.FileSourceInfo{ + ID: utils.ProtoFromUUID(fsID3), + }, nil) + + mockFileSourceStore. + EXPECT(). + UpsertFileSource(fsID2, &storepb.FileSourceInfo{ID: utils.ProtoFromUUID(fsID2), ExpectedState: statuspb.TERMINATED_STATE}). + Return(nil) + + mockFileSourceStore. + EXPECT(). + UpsertFileSource(fsID3, &storepb.FileSourceInfo{ID: utils.ProtoFromUUID(fsID3), ExpectedState: statuspb.TERMINATED_STATE}). + Return(nil) + + var wg sync.WaitGroup + wg.Add(2) + + var seenDeletions []string + msgHandler := func(msg []byte) error { + vzMsg := &messagespb.VizierMessage{} + err := proto.Unmarshal(msg, vzMsg) + require.NoError(t, err) + req := vzMsg.GetFileSourceMessage().GetRemoveFileSourceRequest() + assert.NotNil(t, req) + seenDeletions = append(seenDeletions, utils.ProtoToUUIDStr(req.ID)) + + wg.Done() + return nil + } + + mockAgtMgr. + EXPECT(). + MessageActiveAgents(gomock.Any()). + Times(2). + DoAndReturn(msgHandler) + + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 25*time.Millisecond) + defer fileSourceMgr.Close() + + wg.Wait() + assert.Contains(t, seenDeletions, fsID2.String()) + assert.Contains(t, seenDeletions, fsID3.String()) +} diff --git a/src/vizier/services/metadata/controllers/file_source/mock.go b/src/vizier/services/metadata/controllers/file_source/mock.go new file mode 100644 index 00000000000..d0ccdbec1e2 --- /dev/null +++ b/src/vizier/services/metadata/controllers/file_source/mock.go @@ -0,0 +1,21 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package file_source + +//go:generate mockgen -source=file_source.go -destination=mock/mock_file_source.gen.go Store diff --git a/src/vizier/services/metadata/controllers/file_source/mock/BUILD.bazel b/src/vizier/services/metadata/controllers/file_source/mock/BUILD.bazel new file mode 100644 index 00000000000..fd215aac86e --- /dev/null +++ b/src/vizier/services/metadata/controllers/file_source/mock/BUILD.bazel @@ -0,0 +1,29 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "mock", + srcs = ["mock_file_source.gen.go"], + importpath = "px.dev/pixie/src/vizier/services/metadata/controllers/file_source/mock", + visibility = ["//src/vizier:__subpackages__"], + deps = [ + "//src/vizier/services/metadata/storepb:store_pl_go_proto", + "@com_github_gofrs_uuid//:uuid", + "@com_github_golang_mock//gomock", + ], +) diff --git a/src/vizier/services/metadata/controllers/file_source/mock/mock_file_source.gen.go b/src/vizier/services/metadata/controllers/file_source/mock/mock_file_source.gen.go new file mode 100644 index 00000000000..9ce88669a98 --- /dev/null +++ b/src/vizier/services/metadata/controllers/file_source/mock/mock_file_source.gen.go @@ -0,0 +1,277 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: file_source.go + +// Package mock_file_source is a generated GoMock package. +package mock_file_source + +import ( + reflect "reflect" + time "time" + + uuid "github.com/gofrs/uuid" + gomock "github.com/golang/mock/gomock" + storepb "px.dev/pixie/src/vizier/services/metadata/storepb" +) + +// MockagentMessenger is a mock of agentMessenger interface. +type MockagentMessenger struct { + ctrl *gomock.Controller + recorder *MockagentMessengerMockRecorder +} + +// MockagentMessengerMockRecorder is the mock recorder for MockagentMessenger. +type MockagentMessengerMockRecorder struct { + mock *MockagentMessenger +} + +// NewMockagentMessenger creates a new mock instance. +func NewMockagentMessenger(ctrl *gomock.Controller) *MockagentMessenger { + mock := &MockagentMessenger{ctrl: ctrl} + mock.recorder = &MockagentMessengerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockagentMessenger) EXPECT() *MockagentMessengerMockRecorder { + return m.recorder +} + +// MessageActiveAgents mocks base method. +func (m *MockagentMessenger) MessageActiveAgents(msg []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessageActiveAgents", msg) + ret0, _ := ret[0].(error) + return ret0 +} + +// MessageActiveAgents indicates an expected call of MessageActiveAgents. +func (mr *MockagentMessengerMockRecorder) MessageActiveAgents(msg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageActiveAgents", reflect.TypeOf((*MockagentMessenger)(nil).MessageActiveAgents), msg) +} + +// MessageAgents mocks base method. +func (m *MockagentMessenger) MessageAgents(agentIDs []uuid.UUID, msg []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MessageAgents", agentIDs, msg) + ret0, _ := ret[0].(error) + return ret0 +} + +// MessageAgents indicates an expected call of MessageAgents. +func (mr *MockagentMessengerMockRecorder) MessageAgents(agentIDs, msg interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageAgents", reflect.TypeOf((*MockagentMessenger)(nil).MessageAgents), agentIDs, msg) +} + +// MockStore is a mock of Store interface. +type MockStore struct { + ctrl *gomock.Controller + recorder *MockStoreMockRecorder +} + +// MockStoreMockRecorder is the mock recorder for MockStore. +type MockStoreMockRecorder struct { + mock *MockStore +} + +// NewMockStore creates a new mock instance. +func NewMockStore(ctrl *gomock.Controller) *MockStore { + mock := &MockStore{ctrl: ctrl} + mock.recorder = &MockStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStore) EXPECT() *MockStoreMockRecorder { + return m.recorder +} + +// DeleteFileSource mocks base method. +func (m *MockStore) DeleteFileSource(arg0 uuid.UUID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteFileSource", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteFileSource indicates an expected call of DeleteFileSource. +func (mr *MockStoreMockRecorder) DeleteFileSource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFileSource", reflect.TypeOf((*MockStore)(nil).DeleteFileSource), arg0) +} + +// DeleteFileSourceTTLs mocks base method. +func (m *MockStore) DeleteFileSourceTTLs(arg0 []uuid.UUID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteFileSourceTTLs", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteFileSourceTTLs indicates an expected call of DeleteFileSourceTTLs. +func (mr *MockStoreMockRecorder) DeleteFileSourceTTLs(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFileSourceTTLs", reflect.TypeOf((*MockStore)(nil).DeleteFileSourceTTLs), arg0) +} + +// DeleteFileSourcesForAgent mocks base method. +func (m *MockStore) DeleteFileSourcesForAgent(arg0 uuid.UUID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteFileSourcesForAgent", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteFileSourcesForAgent indicates an expected call of DeleteFileSourcesForAgent. +func (mr *MockStoreMockRecorder) DeleteFileSourcesForAgent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFileSourcesForAgent", reflect.TypeOf((*MockStore)(nil).DeleteFileSourcesForAgent), arg0) +} + +// GetFileSource mocks base method. +func (m *MockStore) GetFileSource(arg0 uuid.UUID) (*storepb.FileSourceInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFileSource", arg0) + ret0, _ := ret[0].(*storepb.FileSourceInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFileSource indicates an expected call of GetFileSource. +func (mr *MockStoreMockRecorder) GetFileSource(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSource", reflect.TypeOf((*MockStore)(nil).GetFileSource), arg0) +} + +// GetFileSourceStates mocks base method. +func (m *MockStore) GetFileSourceStates(arg0 uuid.UUID) ([]*storepb.AgentFileSourceStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFileSourceStates", arg0) + ret0, _ := ret[0].([]*storepb.AgentFileSourceStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFileSourceStates indicates an expected call of GetFileSourceStates. +func (mr *MockStoreMockRecorder) GetFileSourceStates(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSourceStates", reflect.TypeOf((*MockStore)(nil).GetFileSourceStates), arg0) +} + +// GetFileSourceTTLs mocks base method. +func (m *MockStore) GetFileSourceTTLs() ([]uuid.UUID, []time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFileSourceTTLs") + ret0, _ := ret[0].([]uuid.UUID) + ret1, _ := ret[1].([]time.Time) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetFileSourceTTLs indicates an expected call of GetFileSourceTTLs. +func (mr *MockStoreMockRecorder) GetFileSourceTTLs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSourceTTLs", reflect.TypeOf((*MockStore)(nil).GetFileSourceTTLs)) +} + +// GetFileSources mocks base method. +func (m *MockStore) GetFileSources() ([]*storepb.FileSourceInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFileSources") + ret0, _ := ret[0].([]*storepb.FileSourceInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFileSources indicates an expected call of GetFileSources. +func (mr *MockStoreMockRecorder) GetFileSources() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSources", reflect.TypeOf((*MockStore)(nil).GetFileSources)) +} + +// GetFileSourcesForIDs mocks base method. +func (m *MockStore) GetFileSourcesForIDs(arg0 []uuid.UUID) ([]*storepb.FileSourceInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFileSourcesForIDs", arg0) + ret0, _ := ret[0].([]*storepb.FileSourceInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFileSourcesForIDs indicates an expected call of GetFileSourcesForIDs. +func (mr *MockStoreMockRecorder) GetFileSourcesForIDs(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSourcesForIDs", reflect.TypeOf((*MockStore)(nil).GetFileSourcesForIDs), arg0) +} + +// GetFileSourcesWithNames mocks base method. +func (m *MockStore) GetFileSourcesWithNames(arg0 []string) ([]*uuid.UUID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFileSourcesWithNames", arg0) + ret0, _ := ret[0].([]*uuid.UUID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFileSourcesWithNames indicates an expected call of GetFileSourcesWithNames. +func (mr *MockStoreMockRecorder) GetFileSourcesWithNames(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSourcesWithNames", reflect.TypeOf((*MockStore)(nil).GetFileSourcesWithNames), arg0) +} + +// SetFileSourceTTL mocks base method. +func (m *MockStore) SetFileSourceTTL(arg0 uuid.UUID, arg1 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetFileSourceTTL", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetFileSourceTTL indicates an expected call of SetFileSourceTTL. +func (mr *MockStoreMockRecorder) SetFileSourceTTL(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFileSourceTTL", reflect.TypeOf((*MockStore)(nil).SetFileSourceTTL), arg0, arg1) +} + +// SetFileSourceWithName mocks base method. +func (m *MockStore) SetFileSourceWithName(arg0 string, arg1 uuid.UUID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetFileSourceWithName", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetFileSourceWithName indicates an expected call of SetFileSourceWithName. +func (mr *MockStoreMockRecorder) SetFileSourceWithName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFileSourceWithName", reflect.TypeOf((*MockStore)(nil).SetFileSourceWithName), arg0, arg1) +} + +// UpdateFileSourceState mocks base method. +func (m *MockStore) UpdateFileSourceState(arg0 *storepb.AgentFileSourceStatus) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateFileSourceState", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateFileSourceState indicates an expected call of UpdateFileSourceState. +func (mr *MockStoreMockRecorder) UpdateFileSourceState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFileSourceState", reflect.TypeOf((*MockStore)(nil).UpdateFileSourceState), arg0) +} + +// UpsertFileSource mocks base method. +func (m *MockStore) UpsertFileSource(arg0 uuid.UUID, arg1 *storepb.FileSourceInfo) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertFileSource", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertFileSource indicates an expected call of UpsertFileSource. +func (mr *MockStoreMockRecorder) UpsertFileSource(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertFileSource", reflect.TypeOf((*MockStore)(nil).UpsertFileSource), arg0, arg1) +} diff --git a/src/vizier/services/metadata/controllers/message_bus.go b/src/vizier/services/metadata/controllers/message_bus.go index fafee905dbc..2a2be881592 100644 --- a/src/vizier/services/metadata/controllers/message_bus.go +++ b/src/vizier/services/metadata/controllers/message_bus.go @@ -23,6 +23,7 @@ import ( log "github.com/sirupsen/logrus" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" + "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" "px.dev/pixie/src/vizier/services/metadata/controllers/k8smeta" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" ) @@ -52,9 +53,8 @@ type MessageBusController struct { // NewMessageBusController creates a new controller for handling NATS messages. func NewMessageBusController(conn *nats.Conn, agtMgr agent.Manager, - tpMgr *tracepoint.Manager, k8smetaHandler *k8smeta.Handler, - isLeader *bool, -) (*MessageBusController, error) { + tpMgr *tracepoint.Manager, fsMgr *file_source.Manager, k8smetaHandler *k8smeta.Handler, + isLeader *bool) (*MessageBusController, error) { ch := make(chan *nats.Msg, 8192) listeners := make(map[string]TopicListener) subscriptions := make([]*nats.Subscription, 0) @@ -67,7 +67,7 @@ func NewMessageBusController(conn *nats.Conn, agtMgr agent.Manager, subscriptions: subscriptions, } - err := mc.registerListeners(agtMgr, tpMgr, k8smetaHandler) + err := mc.registerListeners(agtMgr, tpMgr, fsMgr, k8smetaHandler) if err != nil { return nil, err } @@ -110,9 +110,9 @@ func (mc *MessageBusController) handleMessages() { } } -func (mc *MessageBusController) registerListeners(agtMgr agent.Manager, tpMgr *tracepoint.Manager, k8smetaHandler *k8smeta.Handler) error { +func (mc *MessageBusController) registerListeners(agtMgr agent.Manager, tpMgr *tracepoint.Manager, fsMgr *file_source.Manager, k8smetaHandler *k8smeta.Handler) error { // Register AgentTopicListener. - atl, err := NewAgentTopicListener(agtMgr, tpMgr, mc.sendMessage) + atl, err := NewAgentTopicListener(agtMgr, tpMgr, fsMgr, mc.sendMessage) if err != nil { return err } diff --git a/src/vizier/services/metadata/controllers/server.go b/src/vizier/services/metadata/controllers/server.go index 8c4a11eebe9..384ab215451 100644 --- a/src/vizier/services/metadata/controllers/server.go +++ b/src/vizier/services/metadata/controllers/server.go @@ -41,6 +41,7 @@ import ( "px.dev/pixie/src/table_store/schemapb" "px.dev/pixie/src/utils" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" + "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" "px.dev/pixie/src/vizier/services/metadata/controllers/k8smeta" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" "px.dev/pixie/src/vizier/services/metadata/metadataenv" @@ -61,6 +62,7 @@ type Server struct { pls k8smeta.PodLabelStore agtMgr agent.Manager tpMgr *tracepoint.Manager + fsMgr *file_source.Manager // The current cursor that is actively running the GetAgentsUpdate stream. Only one GetAgentsUpdate // stream should be running at a time. getAgentsCursor uuid.UUID @@ -68,13 +70,14 @@ type Server struct { } // NewServer creates GRPC handlers. -func NewServer(env metadataenv.MetadataEnv, ds datastore.MultiGetterSetterDeleterCloser, pls k8smeta.PodLabelStore, agtMgr agent.Manager, tpMgr *tracepoint.Manager) *Server { +func NewServer(env metadataenv.MetadataEnv, ds datastore.MultiGetterSetterDeleterCloser, pls k8smeta.PodLabelStore, agtMgr agent.Manager, tpMgr *tracepoint.Manager, fsMgr *file_source.Manager) *Server { return &Server{ env: env, ds: ds, pls: pls, agtMgr: agtMgr, tpMgr: tpMgr, + fsMgr: fsMgr, } } @@ -98,6 +101,9 @@ func convertToRelationMap(computedSchema *storepb.ComputedSchema) (*schemapb.Sch Columns: columnPbs, Desc: schema.Desc, } + if schema.MutationId != "" { + schemaPb.MutationId = schema.MutationId + } respSchemaPb.RelationMap[schema.Name] = schemaPb } @@ -121,6 +127,9 @@ func convertToSchemaInfo(computedSchema *storepb.ComputedSchema) ([]*distributed schemaPb := &schemapb.Relation{ Columns: columnPbs, } + if schema.MutationId != "" { + schemaPb.MutationId = schema.MutationId + } agentIDs, ok := computedSchema.TableNameToAgentIDs[schema.Name] if !ok { @@ -565,6 +574,55 @@ func getTracepointStateFromAgentTracepointStates(agentStates []*storepb.AgentTra return statuspb.UNKNOWN_STATE, []*statuspb.Status{} } +func getFileSourceStateFromAgentFileSourceStates(agentStates []*storepb.AgentFileSourceStatus) (statuspb.LifeCycleState, []*statuspb.Status) { + if len(agentStates) == 0 { + return statuspb.PENDING_STATE, nil + } + + numFailed := 0 + numTerminated := 0 + numPending := 0 + numRunning := 0 + statuses := make([]*statuspb.Status, 0) + + for _, s := range agentStates { + switch s.State { + case statuspb.TERMINATED_STATE: + numTerminated++ + case statuspb.FAILED_STATE: + numFailed++ + if s.Status.ErrCode != statuspb.FAILED_PRECONDITION && s.Status.ErrCode != statuspb.OK { + statuses = append(statuses, s.Status) + } + case statuspb.PENDING_STATE: + numPending++ + case statuspb.RUNNING_STATE: + numRunning++ + } + } + + if numTerminated > 0 { // If any agentFileSources are terminated, then we consider the tracepoint in an terminated state. + return statuspb.TERMINATED_STATE, []*statuspb.Status{} + } + + if numRunning > 0 { // If a single agentFileSource is running, then we consider the overall tracepoint as healthy. + return statuspb.RUNNING_STATE, []*statuspb.Status{} + } + + if numPending > 0 { // If no agentFileSources are running, but some are in a pending state, the tracepoint is pending. + return statuspb.PENDING_STATE, []*statuspb.Status{} + } + + if numFailed > 0 { // If there are no terminated/running/pending tracepoints, then the tracepoint is failed. + if len(statuses) == 0 { + return statuspb.FAILED_STATE, []*statuspb.Status{agentStates[0].Status} // If there are no non FAILED_PRECONDITION statuses, just use the error from the first agent. + } + return statuspb.FAILED_STATE, statuses + } + + return statuspb.UNKNOWN_STATE, []*statuspb.Status{} +} + // RemoveTracepoint is a request to evict the given tracepoint on all agents. func (s *Server) RemoveTracepoint(ctx context.Context, req *metadatapb.RemoveTracepointRequest) (*metadatapb.RemoveTracepointResponse, error) { err := s.tpMgr.RemoveTracepoints(req.Names) @@ -579,6 +637,132 @@ func (s *Server) RemoveTracepoint(ctx context.Context, req *metadatapb.RemoveTra }, nil } +// RegisterFileSource is a request to register the file sources specified in the FileSourceDeployment on all agents. +func (s *Server) RegisterFileSource(ctx context.Context, req *metadatapb.RegisterFileSourceRequest) (*metadatapb.RegisterFileSourceResponse, error) { + responses := make([]*metadatapb.RegisterFileSourceResponse_FileSourceStatus, len(req.Requests)) + + // Create file source. + for i, fs := range req.Requests { + // TODO(ddelnano): Consider adding support for filtering by labels. + fileSourceID, err := s.fsMgr.CreateFileSource(fs.Name, fs) + if err != nil && err != file_source.ErrFileSourceAlreadyExists { + return nil, err + } + if err == file_source.ErrFileSourceAlreadyExists { + responses[i] = &metadatapb.RegisterFileSourceResponse_FileSourceStatus{ + ID: utils.ProtoFromUUID(*fileSourceID), + Status: &statuspb.Status{ + ErrCode: statuspb.ALREADY_EXISTS, + }, + Name: fs.Name, + } + continue + } + + responses[i] = &metadatapb.RegisterFileSourceResponse_FileSourceStatus{ + ID: utils.ProtoFromUUID(*fileSourceID), + Status: &statuspb.Status{ + ErrCode: statuspb.OK, + }, + Name: fs.Name, + } + + // Get all agents currently running. + agents, err := s.agtMgr.GetActiveAgents() + if err != nil { + return nil, err + } + + err = s.fsMgr.RegisterFileSource(agents, *fileSourceID, fs) + if err != nil { + return nil, err + } + } + + resp := &metadatapb.RegisterFileSourceResponse{ + FileSources: responses, + Status: &statuspb.Status{ + ErrCode: statuspb.OK, + }, + } + + return resp, nil +} + +// GetFileSourceInfo is a request to check the status for the given file source. +func (s *Server) GetFileSourceInfo(ctx context.Context, req *metadatapb.GetFileSourceInfoRequest) (*metadatapb.GetFileSourceInfoResponse, error) { + var fileSourceInfos []*storepb.FileSourceInfo + var err error + if len(req.IDs) > 0 { + ids := make([]uuid.UUID, len(req.IDs)) + for i, id := range req.IDs { + ids[i] = utils.UUIDFromProtoOrNil(id) + } + + fileSourceInfos, err = s.fsMgr.GetFileSourcesForIDs(ids) + } else { + fileSourceInfos, err = s.fsMgr.GetAllFileSources() + } + + if err != nil { + return nil, err + } + + fileSourceState := make([]*metadatapb.GetFileSourceInfoResponse_FileSourceState, len(fileSourceInfos)) + + for i, fs := range fileSourceInfos { + if fs == nil { // FileSourceDeployment does not exist. + fileSourceState[i] = &metadatapb.GetFileSourceInfoResponse_FileSourceState{ + ID: req.IDs[i], + State: statuspb.UNKNOWN_STATE, + Statuses: []*statuspb.Status{{ + ErrCode: statuspb.NOT_FOUND, + }}, + } + continue + } + tUUID := utils.UUIDFromProtoOrNil(fs.ID) + + fileSourceStates, err := s.fsMgr.GetFileSourceStates(tUUID) + if err != nil { + return nil, err + } + + state, statuses := getFileSourceStateFromAgentFileSourceStates(fileSourceStates) + + // TODO(ddelnano): For now file sources only have one schema + schemas := make([]string, 1) + schemas[0] = fs.FileSource.TableName + + fileSourceState[i] = &metadatapb.GetFileSourceInfoResponse_FileSourceState{ + ID: fs.ID, + State: state, + Statuses: statuses, + Name: fs.Name, + ExpectedState: fs.ExpectedState, + SchemaNames: schemas, + } + } + + return &metadatapb.GetFileSourceInfoResponse{ + FileSources: fileSourceState, + }, nil +} + +// RemoveFileSource is a request to evict the given file sources on all agents. +func (s *Server) RemoveFileSource(ctx context.Context, req *metadatapb.RemoveFileSourceRequest) (*metadatapb.RemoveFileSourceResponse, error) { + err := s.fsMgr.RemoveFileSources(req.Names) + if err != nil { + return nil, err + } + + return &metadatapb.RemoveFileSourceResponse{ + Status: &statuspb.Status{ + ErrCode: statuspb.OK, + }, + }, nil +} + // UpdateConfig updates the config for the specified agent. func (s *Server) UpdateConfig(ctx context.Context, req *metadatapb.UpdateConfigRequest) (*metadatapb.UpdateConfigResponse, error) { splitName := strings.Split(req.AgentPodName, "/") diff --git a/src/vizier/services/metadata/controllers/server_test.go b/src/vizier/services/metadata/controllers/server_test.go index 9a9dc844c9a..bfa36e4d2c9 100644 --- a/src/vizier/services/metadata/controllers/server_test.go +++ b/src/vizier/services/metadata/controllers/server_test.go @@ -55,6 +55,8 @@ import ( "px.dev/pixie/src/vizier/messages/messagespb" "px.dev/pixie/src/vizier/services/metadata/controllers" mock_agent "px.dev/pixie/src/vizier/services/metadata/controllers/agent/mock" + "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" + mock_file_source "px.dev/pixie/src/vizier/services/metadata/controllers/file_source/mock" "px.dev/pixie/src/vizier/services/metadata/controllers/testutils" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" mock_tracepoint "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint/mock" @@ -65,7 +67,7 @@ import ( ) func testTableInfos() []*storepb.TableInfo { - tableInfos := make([]*storepb.TableInfo, 2) + tableInfos := make([]*storepb.TableInfo, 3) schema1Cols := make([]*storepb.TableInfo_ColumnInfo, 3) schema1Cols[0] = &storepb.TableInfo_ColumnInfo{ @@ -100,6 +102,17 @@ func testTableInfos() []*storepb.TableInfo { Columns: schema2Cols, Desc: "table 2 desc", } + schema3Cols := make([]*storepb.TableInfo_ColumnInfo, 1) + schema3Cols[0] = &storepb.TableInfo_ColumnInfo{ + Name: "t3Col1", + DataType: 1, + } + tableInfos[2] = &storepb.TableInfo{ + Name: "table3", + Columns: schema3Cols, + Desc: "table 3 desc", + MutationId: "mutation id", + } return tableInfos } @@ -165,7 +178,7 @@ func TestGetAgentInfo(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil, nil) req := metadatapb.AgentInfoRequest{} @@ -211,7 +224,7 @@ func TestGetAgentInfoGetActiveAgentsFailed(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil, nil) req := metadatapb.AgentInfoRequest{} @@ -240,7 +253,7 @@ func TestGetSchemas(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil, nil) req := metadatapb.SchemaRequest{} @@ -249,7 +262,7 @@ func TestGetSchemas(t *testing.T) { require.NoError(t, err) assert.NotNil(t, resp) - assert.Equal(t, 2, len(resp.Schema.RelationMap)) + assert.Equal(t, 3, len(resp.Schema.RelationMap)) assert.Equal(t, "table 1 desc", resp.Schema.RelationMap["table1"].Desc) assert.Equal(t, 3, len(resp.Schema.RelationMap["table1"].Columns)) assert.Equal(t, "t1Col1", resp.Schema.RelationMap["table1"].Columns[0].ColumnName) @@ -348,7 +361,7 @@ func Test_Server_RegisterTracepoint(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, nil) reqs := []*metadatapb.RegisterTracepointRequest_TracepointRequest{ { @@ -473,7 +486,7 @@ func Test_Server_RegisterTracepoint_Exists(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, nil) reqs := []*metadatapb.RegisterTracepointRequest_TracepointRequest{ { @@ -613,8 +626,10 @@ func Test_Server_GetTracepointInfo(t *testing.T) { defer ctrl.Finish() mockAgtMgr := mock_agent.NewMockManager(ctrl) mockTracepointStore := mock_tracepoint.NewMockStore(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) tracepointMgr := tracepoint.NewManager(mockTracepointStore, mockAgtMgr, 5*time.Second) + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) program := &logicalpb.TracepointDeployment{ Programs: []*logicalpb.TracepointDeployment_TracepointProgram{ @@ -658,7 +673,7 @@ func Test_Server_GetTracepointInfo(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, fileSourceMgr) req := metadatapb.GetTracepointInfoRequest{ IDs: []*uuidpb.UUID{utils.ProtoFromUUID(tID)}, } @@ -692,8 +707,10 @@ func Test_Server_RemoveTracepoint(t *testing.T) { defer ctrl.Finish() mockAgtMgr := mock_agent.NewMockManager(ctrl) mockTracepointStore := mock_tracepoint.NewMockStore(ctrl) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) tracepointMgr := tracepoint.NewManager(mockTracepointStore, mockAgtMgr, 5*time.Second) + fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) tpID1 := uuid.Must(uuid.NewV4()) tpID2 := uuid.Must(uuid.NewV4()) @@ -716,7 +733,7 @@ func Test_Server_RemoveTracepoint(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, fileSourceMgr) req := metadatapb.RemoveTracepointRequest{ Names: []string{"test1", "test2"}, @@ -831,6 +848,9 @@ func TestGetAgentUpdates(t *testing.T) { "table2": { AgentID: []*uuidpb.UUID{u1pb}, }, + "table3": { + AgentID: []*uuidpb.UUID{u1pb, u2pb}, + }, }, } @@ -902,7 +922,7 @@ func TestGetAgentUpdates(t *testing.T) { t.Fatal("Failed to create api environment.") } - srv := controllers.NewServer(mdEnv, nil, nil, mockAgtMgr, nil) + srv := controllers.NewServer(mdEnv, nil, nil, mockAgtMgr, nil, nil) env := env.New("withpixie.ai") s := server.CreateGRPCServer(env, &server.GRPCServerOptions{}) @@ -1012,7 +1032,7 @@ func TestGetAgentUpdates(t *testing.T) { assert.Equal(t, 1, len(r1.AgentUpdates)) assert.Equal(t, updates1[2], r1.AgentUpdates[0]) // Check schemas - assert.Equal(t, 2, len(r1.AgentSchemas)) + assert.Equal(t, 3, len(r1.AgentSchemas)) assert.Equal(t, "table1", r1.AgentSchemas[0].Name) assert.Equal(t, 3, len(r1.AgentSchemas[0].Relation.Columns)) assert.Equal(t, 2, len(r1.AgentSchemas[0].AgentList)) @@ -1022,6 +1042,12 @@ func TestGetAgentUpdates(t *testing.T) { assert.Equal(t, 2, len(r1.AgentSchemas[1].Relation.Columns)) assert.Equal(t, 1, len(r1.AgentSchemas[1].AgentList)) assert.Equal(t, u1pb, r1.AgentSchemas[1].AgentList[0]) + assert.Equal(t, "table3", r1.AgentSchemas[2].Name) + assert.Equal(t, 1, len(r1.AgentSchemas[2].Relation.Columns)) + assert.Equal(t, 2, len(r1.AgentSchemas[2].AgentList)) + assert.Equal(t, u1pb, r1.AgentSchemas[2].AgentList[0]) + assert.Equal(t, u2pb, r1.AgentSchemas[2].AgentList[1]) + assert.Equal(t, "mutation id", r1.AgentSchemas[2].Relation.MutationId) // Check empty message r2 := resps[2] @@ -1052,6 +1078,9 @@ func Test_Server_UpdateConfig(t *testing.T) { mockTracepointStore := mock_tracepoint.NewMockStore(ctrl) tracepointMgr := tracepoint.NewManager(mockTracepointStore, mockAgtMgr, 5*time.Second) + mockFileSourceStore := mock_file_source.NewMockStore(ctrl) + fsMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) + mockAgtMgr. EXPECT(). UpdateConfig("pl", "pem-1234", "gprof", "true"). @@ -1063,7 +1092,7 @@ func Test_Server_UpdateConfig(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, fsMgr) req := metadatapb.UpdateConfigRequest{ AgentPodName: "pl/pem-1234", @@ -1104,7 +1133,7 @@ func Test_Server_ConvertLabelsToPods(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, pls, nil, nil) + s := controllers.NewServer(env, nil, pls, nil, nil, nil) program := &logicalpb.TracepointDeployment{} err = proto.UnmarshalText(testutils.TDLabelSelectorPb, program) diff --git a/src/vizier/services/metadata/metadata_server.go b/src/vizier/services/metadata/metadata_server.go index b2959d80e98..75c5679d210 100644 --- a/src/vizier/services/metadata/metadata_server.go +++ b/src/vizier/services/metadata/metadata_server.go @@ -48,6 +48,7 @@ import ( "px.dev/pixie/src/vizier/services/metadata/controllers" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" "px.dev/pixie/src/vizier/services/metadata/controllers/cronscript" + "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" "px.dev/pixie/src/vizier/services/metadata/controllers/k8smeta" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" "px.dev/pixie/src/vizier/services/metadata/metadataenv" @@ -270,7 +271,12 @@ func main() { tracepointMgr := tracepoint.NewManager(tds, agtMgr, 30*time.Second) defer tracepointMgr.Close() - mc, err := controllers.NewMessageBusController(nc, agtMgr, tracepointMgr, + fds := file_source.NewDatastore(dataStore) + // Initialize file source handler. + fsMgr := file_source.NewManager(fds, agtMgr, 30*time.Second) + defer fsMgr.Close() + + mc, err := controllers.NewMessageBusController(nc, agtMgr, tracepointMgr, fsMgr, mdh, &isLeader) if err != nil { log.WithError(err).Fatal("Failed to connect to message bus") @@ -286,7 +292,7 @@ func main() { healthz.RegisterDefaultChecks(mux) metrics.MustRegisterMetricsHandlerNoDefaultMetrics(mux) - svr := controllers.NewServer(env, dataStore, k8sMds, agtMgr, tracepointMgr) + svr := controllers.NewServer(env, dataStore, k8sMds, agtMgr, tracepointMgr, fsMgr) csDs := cronscript.NewDatastore(dataStore) cronScriptSvr := cronscript.New(csDs) @@ -301,6 +307,7 @@ func main() { httpmiddleware.WithBearerAuthMiddleware(env, mux), maxMsgSize) metadatapb.RegisterMetadataServiceServer(s.GRPCServer(), svr) metadatapb.RegisterMetadataTracepointServiceServer(s.GRPCServer(), svr) + metadatapb.RegisterMetadataFileSourceServiceServer(s.GRPCServer(), svr) metadatapb.RegisterMetadataConfigServiceServer(s.GRPCServer(), svr) metadatapb.RegisterCronScriptStoreServiceServer(s.GRPCServer(), cronScriptSvr) diff --git a/src/vizier/services/metadata/metadatapb/BUILD.bazel b/src/vizier/services/metadata/metadatapb/BUILD.bazel index 11b8b4962db..c1b4c41ae1b 100644 --- a/src/vizier/services/metadata/metadatapb/BUILD.bazel +++ b/src/vizier/services/metadata/metadatapb/BUILD.bazel @@ -24,6 +24,7 @@ pl_proto_library( "//src/api/proto/uuidpb:uuid_pl_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_proto", + "//src/carnot/planner/file_source/ir:logical_pl_proto", "//src/common/base/statuspb:status_pl_proto", "//src/shared/cvmsgspb:cvmsgs_pl_proto", "//src/shared/types/typespb:types_pl_proto", @@ -42,6 +43,7 @@ pl_cc_proto_library( "//src/api/proto/uuidpb:uuid_pl_cc_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_cc_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", + "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/common/base/statuspb:status_pl_cc_proto", "//src/shared/cvmsgspb:cvmsgs_pl_cc_proto", "//src/shared/types/typespb/wrapper:cc_library", @@ -61,6 +63,7 @@ pl_go_proto_library( "//src/api/proto/uuidpb:uuid_pl_go_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_go_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto", + "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/common/base/statuspb:status_pl_go_proto", "//src/shared/cvmsgspb:cvmsgs_pl_go_proto", "//src/shared/types/typespb:types_pl_go_proto", diff --git a/src/vizier/services/metadata/metadatapb/service.pb.go b/src/vizier/services/metadata/metadatapb/service.pb.go index 64e34931455..52f764c4892 100755 --- a/src/vizier/services/metadata/metadatapb/service.pb.go +++ b/src/vizier/services/metadata/metadatapb/service.pb.go @@ -20,6 +20,7 @@ import ( uuidpb "px.dev/pixie/src/api/proto/uuidpb" distributedpb "px.dev/pixie/src/carnot/planner/distributedpb" logicalpb "px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb" + ir "px.dev/pixie/src/carnot/planner/file_source/ir" statuspb "px.dev/pixie/src/common/base/statuspb" cvmsgspb "px.dev/pixie/src/shared/cvmsgspb" schemapb "px.dev/pixie/src/table_store/schemapb" @@ -624,21 +625,21 @@ func (m *WithPrefixKeyResponse_KV) GetValue() []byte { return nil } -type RegisterTracepointRequest struct { - Requests []*RegisterTracepointRequest_TracepointRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` +type RegisterFileSourceRequest struct { + Requests []*ir.FileSourceDeployment `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } -func (m *RegisterTracepointRequest) Reset() { *m = RegisterTracepointRequest{} } -func (*RegisterTracepointRequest) ProtoMessage() {} -func (*RegisterTracepointRequest) Descriptor() ([]byte, []int) { +func (m *RegisterFileSourceRequest) Reset() { *m = RegisterFileSourceRequest{} } +func (*RegisterFileSourceRequest) ProtoMessage() {} +func (*RegisterFileSourceRequest) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{10} } -func (m *RegisterTracepointRequest) XXX_Unmarshal(b []byte) error { +func (m *RegisterFileSourceRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterTracepointRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterFileSourceRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -648,102 +649,41 @@ func (m *RegisterTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *RegisterTracepointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterTracepointRequest.Merge(m, src) +func (m *RegisterFileSourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterFileSourceRequest.Merge(m, src) } -func (m *RegisterTracepointRequest) XXX_Size() int { +func (m *RegisterFileSourceRequest) XXX_Size() int { return m.Size() } -func (m *RegisterTracepointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterTracepointRequest.DiscardUnknown(m) +func (m *RegisterFileSourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterFileSourceRequest.DiscardUnknown(m) } -var xxx_messageInfo_RegisterTracepointRequest proto.InternalMessageInfo +var xxx_messageInfo_RegisterFileSourceRequest proto.InternalMessageInfo -func (m *RegisterTracepointRequest) GetRequests() []*RegisterTracepointRequest_TracepointRequest { +func (m *RegisterFileSourceRequest) GetRequests() []*ir.FileSourceDeployment { if m != nil { return m.Requests } return nil } -type RegisterTracepointRequest_TracepointRequest struct { - TracepointDeployment *logicalpb.TracepointDeployment `protobuf:"bytes,1,opt,name=tracepoint_deployment,json=tracepointDeployment,proto3" json:"tracepoint_deployment,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - TTL *types.Duration `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` -} - -func (m *RegisterTracepointRequest_TracepointRequest) Reset() { - *m = RegisterTracepointRequest_TracepointRequest{} -} -func (*RegisterTracepointRequest_TracepointRequest) ProtoMessage() {} -func (*RegisterTracepointRequest_TracepointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{10, 0} -} -func (m *RegisterTracepointRequest_TracepointRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RegisterTracepointRequest_TracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RegisterTracepointRequest_TracepointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.Merge(m, src) -} -func (m *RegisterTracepointRequest_TracepointRequest) XXX_Size() int { - return m.Size() -} -func (m *RegisterTracepointRequest_TracepointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RegisterTracepointRequest_TracepointRequest proto.InternalMessageInfo - -func (m *RegisterTracepointRequest_TracepointRequest) GetTracepointDeployment() *logicalpb.TracepointDeployment { - if m != nil { - return m.TracepointDeployment - } - return nil -} - -func (m *RegisterTracepointRequest_TracepointRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *RegisterTracepointRequest_TracepointRequest) GetTTL() *types.Duration { - if m != nil { - return m.TTL - } - return nil -} - -type RegisterTracepointResponse struct { - Tracepoints []*RegisterTracepointResponse_TracepointStatus `protobuf:"bytes,1,rep,name=tracepoints,proto3" json:"tracepoints,omitempty"` +type RegisterFileSourceResponse struct { + FileSources []*RegisterFileSourceResponse_FileSourceStatus `protobuf:"bytes,1,rep,name=file_sources,json=fileSources,proto3" json:"file_sources,omitempty"` Status *statuspb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` } -func (m *RegisterTracepointResponse) Reset() { *m = RegisterTracepointResponse{} } -func (*RegisterTracepointResponse) ProtoMessage() {} -func (*RegisterTracepointResponse) Descriptor() ([]byte, []int) { +func (m *RegisterFileSourceResponse) Reset() { *m = RegisterFileSourceResponse{} } +func (*RegisterFileSourceResponse) ProtoMessage() {} +func (*RegisterFileSourceResponse) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{11} } -func (m *RegisterTracepointResponse) XXX_Unmarshal(b []byte) error { +func (m *RegisterFileSourceResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterFileSourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterTracepointResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterFileSourceResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -753,51 +693,51 @@ func (m *RegisterTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ( return b[:n], nil } } -func (m *RegisterTracepointResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterTracepointResponse.Merge(m, src) +func (m *RegisterFileSourceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterFileSourceResponse.Merge(m, src) } -func (m *RegisterTracepointResponse) XXX_Size() int { +func (m *RegisterFileSourceResponse) XXX_Size() int { return m.Size() } -func (m *RegisterTracepointResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterTracepointResponse.DiscardUnknown(m) +func (m *RegisterFileSourceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterFileSourceResponse.DiscardUnknown(m) } -var xxx_messageInfo_RegisterTracepointResponse proto.InternalMessageInfo +var xxx_messageInfo_RegisterFileSourceResponse proto.InternalMessageInfo -func (m *RegisterTracepointResponse) GetTracepoints() []*RegisterTracepointResponse_TracepointStatus { +func (m *RegisterFileSourceResponse) GetFileSources() []*RegisterFileSourceResponse_FileSourceStatus { if m != nil { - return m.Tracepoints + return m.FileSources } return nil } -func (m *RegisterTracepointResponse) GetStatus() *statuspb.Status { +func (m *RegisterFileSourceResponse) GetStatus() *statuspb.Status { if m != nil { return m.Status } return nil } -type RegisterTracepointResponse_TracepointStatus struct { +type RegisterFileSourceResponse_FileSourceStatus struct { Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` ID *uuidpb.UUID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } -func (m *RegisterTracepointResponse_TracepointStatus) Reset() { - *m = RegisterTracepointResponse_TracepointStatus{} +func (m *RegisterFileSourceResponse_FileSourceStatus) Reset() { + *m = RegisterFileSourceResponse_FileSourceStatus{} } -func (*RegisterTracepointResponse_TracepointStatus) ProtoMessage() {} -func (*RegisterTracepointResponse_TracepointStatus) Descriptor() ([]byte, []int) { +func (*RegisterFileSourceResponse_FileSourceStatus) ProtoMessage() {} +func (*RegisterFileSourceResponse_FileSourceStatus) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{11, 0} } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_Unmarshal(b []byte) error { +func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterFileSourceResponse_FileSourceStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -807,54 +747,54 @@ func (m *RegisterTracepointResponse_TracepointStatus) XXX_Marshal(b []byte, dete return b[:n], nil } } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.Merge(m, src) +func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterFileSourceResponse_FileSourceStatus.Merge(m, src) } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_Size() int { +func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Size() int { return m.Size() } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.DiscardUnknown(m) +func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterFileSourceResponse_FileSourceStatus.DiscardUnknown(m) } -var xxx_messageInfo_RegisterTracepointResponse_TracepointStatus proto.InternalMessageInfo +var xxx_messageInfo_RegisterFileSourceResponse_FileSourceStatus proto.InternalMessageInfo -func (m *RegisterTracepointResponse_TracepointStatus) GetStatus() *statuspb.Status { +func (m *RegisterFileSourceResponse_FileSourceStatus) GetStatus() *statuspb.Status { if m != nil { return m.Status } return nil } -func (m *RegisterTracepointResponse_TracepointStatus) GetID() *uuidpb.UUID { +func (m *RegisterFileSourceResponse_FileSourceStatus) GetID() *uuidpb.UUID { if m != nil { return m.ID } return nil } -func (m *RegisterTracepointResponse_TracepointStatus) GetName() string { +func (m *RegisterFileSourceResponse_FileSourceStatus) GetName() string { if m != nil { return m.Name } return "" } -type GetTracepointInfoRequest struct { +type GetFileSourceInfoRequest struct { IDs []*uuidpb.UUID `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` } -func (m *GetTracepointInfoRequest) Reset() { *m = GetTracepointInfoRequest{} } -func (*GetTracepointInfoRequest) ProtoMessage() {} -func (*GetTracepointInfoRequest) Descriptor() ([]byte, []int) { +func (m *GetFileSourceInfoRequest) Reset() { *m = GetFileSourceInfoRequest{} } +func (*GetFileSourceInfoRequest) ProtoMessage() {} +func (*GetFileSourceInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{12} } -func (m *GetTracepointInfoRequest) XXX_Unmarshal(b []byte) error { +func (m *GetFileSourceInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetTracepointInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetFileSourceInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetTracepointInfoRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_GetFileSourceInfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -864,40 +804,40 @@ func (m *GetTracepointInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *GetTracepointInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTracepointInfoRequest.Merge(m, src) +func (m *GetFileSourceInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetFileSourceInfoRequest.Merge(m, src) } -func (m *GetTracepointInfoRequest) XXX_Size() int { +func (m *GetFileSourceInfoRequest) XXX_Size() int { return m.Size() } -func (m *GetTracepointInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetTracepointInfoRequest.DiscardUnknown(m) +func (m *GetFileSourceInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetFileSourceInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetTracepointInfoRequest proto.InternalMessageInfo +var xxx_messageInfo_GetFileSourceInfoRequest proto.InternalMessageInfo -func (m *GetTracepointInfoRequest) GetIDs() []*uuidpb.UUID { +func (m *GetFileSourceInfoRequest) GetIDs() []*uuidpb.UUID { if m != nil { return m.IDs } return nil } -type GetTracepointInfoResponse struct { - Tracepoints []*GetTracepointInfoResponse_TracepointState `protobuf:"bytes,1,rep,name=tracepoints,proto3" json:"tracepoints,omitempty"` +type GetFileSourceInfoResponse struct { + FileSources []*GetFileSourceInfoResponse_FileSourceState `protobuf:"bytes,1,rep,name=file_sources,json=fileSources,proto3" json:"file_sources,omitempty"` } -func (m *GetTracepointInfoResponse) Reset() { *m = GetTracepointInfoResponse{} } -func (*GetTracepointInfoResponse) ProtoMessage() {} -func (*GetTracepointInfoResponse) Descriptor() ([]byte, []int) { +func (m *GetFileSourceInfoResponse) Reset() { *m = GetFileSourceInfoResponse{} } +func (*GetFileSourceInfoResponse) ProtoMessage() {} +func (*GetFileSourceInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{13} } -func (m *GetTracepointInfoResponse) XXX_Unmarshal(b []byte) error { +func (m *GetFileSourceInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetTracepointInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetFileSourceInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetTracepointInfoResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetFileSourceInfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -907,26 +847,26 @@ func (m *GetTracepointInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *GetTracepointInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTracepointInfoResponse.Merge(m, src) +func (m *GetFileSourceInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetFileSourceInfoResponse.Merge(m, src) } -func (m *GetTracepointInfoResponse) XXX_Size() int { +func (m *GetFileSourceInfoResponse) XXX_Size() int { return m.Size() } -func (m *GetTracepointInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetTracepointInfoResponse.DiscardUnknown(m) +func (m *GetFileSourceInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetFileSourceInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetTracepointInfoResponse proto.InternalMessageInfo +var xxx_messageInfo_GetFileSourceInfoResponse proto.InternalMessageInfo -func (m *GetTracepointInfoResponse) GetTracepoints() []*GetTracepointInfoResponse_TracepointState { +func (m *GetFileSourceInfoResponse) GetFileSources() []*GetFileSourceInfoResponse_FileSourceState { if m != nil { - return m.Tracepoints + return m.FileSources } return nil } -type GetTracepointInfoResponse_TracepointState struct { +type GetFileSourceInfoResponse_FileSourceState struct { ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` State statuspb.LifeCycleState `protobuf:"varint,2,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` Statuses []*statuspb.Status `protobuf:"bytes,3,rep,name=statuses,proto3" json:"statuses,omitempty"` @@ -935,19 +875,19 @@ type GetTracepointInfoResponse_TracepointState struct { SchemaNames []string `protobuf:"bytes,6,rep,name=schema_names,json=schemaNames,proto3" json:"schema_names,omitempty"` } -func (m *GetTracepointInfoResponse_TracepointState) Reset() { - *m = GetTracepointInfoResponse_TracepointState{} +func (m *GetFileSourceInfoResponse_FileSourceState) Reset() { + *m = GetFileSourceInfoResponse_FileSourceState{} } -func (*GetTracepointInfoResponse_TracepointState) ProtoMessage() {} -func (*GetTracepointInfoResponse_TracepointState) Descriptor() ([]byte, []int) { +func (*GetFileSourceInfoResponse_FileSourceState) ProtoMessage() {} +func (*GetFileSourceInfoResponse_FileSourceState) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{13, 0} } -func (m *GetTracepointInfoResponse_TracepointState) XXX_Unmarshal(b []byte) error { +func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetTracepointInfoResponse_TracepointState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetTracepointInfoResponse_TracepointState.Marshal(b, m, deterministic) + return xxx_messageInfo_GetFileSourceInfoResponse_FileSourceState.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -957,75 +897,75 @@ func (m *GetTracepointInfoResponse_TracepointState) XXX_Marshal(b []byte, determ return b[:n], nil } } -func (m *GetTracepointInfoResponse_TracepointState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTracepointInfoResponse_TracepointState.Merge(m, src) +func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetFileSourceInfoResponse_FileSourceState.Merge(m, src) } -func (m *GetTracepointInfoResponse_TracepointState) XXX_Size() int { +func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Size() int { return m.Size() } -func (m *GetTracepointInfoResponse_TracepointState) XXX_DiscardUnknown() { - xxx_messageInfo_GetTracepointInfoResponse_TracepointState.DiscardUnknown(m) +func (m *GetFileSourceInfoResponse_FileSourceState) XXX_DiscardUnknown() { + xxx_messageInfo_GetFileSourceInfoResponse_FileSourceState.DiscardUnknown(m) } -var xxx_messageInfo_GetTracepointInfoResponse_TracepointState proto.InternalMessageInfo +var xxx_messageInfo_GetFileSourceInfoResponse_FileSourceState proto.InternalMessageInfo -func (m *GetTracepointInfoResponse_TracepointState) GetID() *uuidpb.UUID { +func (m *GetFileSourceInfoResponse_FileSourceState) GetID() *uuidpb.UUID { if m != nil { return m.ID } return nil } -func (m *GetTracepointInfoResponse_TracepointState) GetState() statuspb.LifeCycleState { +func (m *GetFileSourceInfoResponse_FileSourceState) GetState() statuspb.LifeCycleState { if m != nil { return m.State } return statuspb.UNKNOWN_STATE } -func (m *GetTracepointInfoResponse_TracepointState) GetStatuses() []*statuspb.Status { +func (m *GetFileSourceInfoResponse_FileSourceState) GetStatuses() []*statuspb.Status { if m != nil { return m.Statuses } return nil } -func (m *GetTracepointInfoResponse_TracepointState) GetName() string { +func (m *GetFileSourceInfoResponse_FileSourceState) GetName() string { if m != nil { return m.Name } return "" } -func (m *GetTracepointInfoResponse_TracepointState) GetExpectedState() statuspb.LifeCycleState { +func (m *GetFileSourceInfoResponse_FileSourceState) GetExpectedState() statuspb.LifeCycleState { if m != nil { return m.ExpectedState } return statuspb.UNKNOWN_STATE } -func (m *GetTracepointInfoResponse_TracepointState) GetSchemaNames() []string { +func (m *GetFileSourceInfoResponse_FileSourceState) GetSchemaNames() []string { if m != nil { return m.SchemaNames } return nil } -type RemoveTracepointRequest struct { +type RemoveFileSourceRequest struct { Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` } -func (m *RemoveTracepointRequest) Reset() { *m = RemoveTracepointRequest{} } -func (*RemoveTracepointRequest) ProtoMessage() {} -func (*RemoveTracepointRequest) Descriptor() ([]byte, []int) { +func (m *RemoveFileSourceRequest) Reset() { *m = RemoveFileSourceRequest{} } +func (*RemoveFileSourceRequest) ProtoMessage() {} +func (*RemoveFileSourceRequest) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{14} } -func (m *RemoveTracepointRequest) XXX_Unmarshal(b []byte) error { +func (m *RemoveFileSourceRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RemoveTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RemoveFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RemoveTracepointRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RemoveFileSourceRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1035,40 +975,40 @@ func (m *RemoveTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *RemoveTracepointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveTracepointRequest.Merge(m, src) +func (m *RemoveFileSourceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveFileSourceRequest.Merge(m, src) } -func (m *RemoveTracepointRequest) XXX_Size() int { +func (m *RemoveFileSourceRequest) XXX_Size() int { return m.Size() } -func (m *RemoveTracepointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveTracepointRequest.DiscardUnknown(m) +func (m *RemoveFileSourceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveFileSourceRequest.DiscardUnknown(m) } -var xxx_messageInfo_RemoveTracepointRequest proto.InternalMessageInfo +var xxx_messageInfo_RemoveFileSourceRequest proto.InternalMessageInfo -func (m *RemoveTracepointRequest) GetNames() []string { +func (m *RemoveFileSourceRequest) GetNames() []string { if m != nil { return m.Names } return nil } -type RemoveTracepointResponse struct { +type RemoveFileSourceResponse struct { Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (m *RemoveTracepointResponse) Reset() { *m = RemoveTracepointResponse{} } -func (*RemoveTracepointResponse) ProtoMessage() {} -func (*RemoveTracepointResponse) Descriptor() ([]byte, []int) { +func (m *RemoveFileSourceResponse) Reset() { *m = RemoveFileSourceResponse{} } +func (*RemoveFileSourceResponse) ProtoMessage() {} +func (*RemoveFileSourceResponse) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{15} } -func (m *RemoveTracepointResponse) XXX_Unmarshal(b []byte) error { +func (m *RemoveFileSourceResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RemoveTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RemoveFileSourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RemoveTracepointResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_RemoveFileSourceResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1078,42 +1018,40 @@ func (m *RemoveTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *RemoveTracepointResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveTracepointResponse.Merge(m, src) +func (m *RemoveFileSourceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveFileSourceResponse.Merge(m, src) } -func (m *RemoveTracepointResponse) XXX_Size() int { +func (m *RemoveFileSourceResponse) XXX_Size() int { return m.Size() } -func (m *RemoveTracepointResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveTracepointResponse.DiscardUnknown(m) +func (m *RemoveFileSourceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveFileSourceResponse.DiscardUnknown(m) } -var xxx_messageInfo_RemoveTracepointResponse proto.InternalMessageInfo +var xxx_messageInfo_RemoveFileSourceResponse proto.InternalMessageInfo -func (m *RemoveTracepointResponse) GetStatus() *statuspb.Status { +func (m *RemoveFileSourceResponse) GetStatus() *statuspb.Status { if m != nil { return m.Status } return nil } -type UpdateConfigRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - AgentPodName string `protobuf:"bytes,3,opt,name=agent_pod_name,json=agentPodName,proto3" json:"agent_pod_name,omitempty"` +type RegisterTracepointRequest struct { + Requests []*RegisterTracepointRequest_TracepointRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } -func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } -func (*UpdateConfigRequest) ProtoMessage() {} -func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { +func (m *RegisterTracepointRequest) Reset() { *m = RegisterTracepointRequest{} } +func (*RegisterTracepointRequest) ProtoMessage() {} +func (*RegisterTracepointRequest) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{16} } -func (m *UpdateConfigRequest) XXX_Unmarshal(b []byte) error { +func (m *RegisterTracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *UpdateConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_UpdateConfigRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterTracepointRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1123,54 +1061,44 @@ func (m *UpdateConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *UpdateConfigRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateConfigRequest.Merge(m, src) +func (m *RegisterTracepointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterTracepointRequest.Merge(m, src) } -func (m *UpdateConfigRequest) XXX_Size() int { +func (m *RegisterTracepointRequest) XXX_Size() int { return m.Size() } -func (m *UpdateConfigRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateConfigRequest.DiscardUnknown(m) +func (m *RegisterTracepointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterTracepointRequest.DiscardUnknown(m) } -var xxx_messageInfo_UpdateConfigRequest proto.InternalMessageInfo - -func (m *UpdateConfigRequest) GetKey() string { - if m != nil { - return m.Key - } - return "" -} +var xxx_messageInfo_RegisterTracepointRequest proto.InternalMessageInfo -func (m *UpdateConfigRequest) GetValue() string { +func (m *RegisterTracepointRequest) GetRequests() []*RegisterTracepointRequest_TracepointRequest { if m != nil { - return m.Value + return m.Requests } - return "" + return nil } -func (m *UpdateConfigRequest) GetAgentPodName() string { - if m != nil { - return m.AgentPodName - } - return "" +type RegisterTracepointRequest_TracepointRequest struct { + TracepointDeployment *logicalpb.TracepointDeployment `protobuf:"bytes,1,opt,name=tracepoint_deployment,json=tracepointDeployment,proto3" json:"tracepoint_deployment,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + TTL *types.Duration `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` } -type UpdateConfigResponse struct { - Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` +func (m *RegisterTracepointRequest_TracepointRequest) Reset() { + *m = RegisterTracepointRequest_TracepointRequest{} } - -func (m *UpdateConfigResponse) Reset() { *m = UpdateConfigResponse{} } -func (*UpdateConfigResponse) ProtoMessage() {} -func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{17} +func (*RegisterTracepointRequest_TracepointRequest) ProtoMessage() {} +func (*RegisterTracepointRequest_TracepointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{16, 0} } -func (m *UpdateConfigResponse) XXX_Unmarshal(b []byte) error { +func (m *RegisterTracepointRequest_TracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *UpdateConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterTracepointRequest_TracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_UpdateConfigResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1180,75 +1108,55 @@ func (m *UpdateConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *UpdateConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateConfigResponse.Merge(m, src) +func (m *RegisterTracepointRequest_TracepointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.Merge(m, src) } -func (m *UpdateConfigResponse) XXX_Size() int { +func (m *RegisterTracepointRequest_TracepointRequest) XXX_Size() int { return m.Size() } -func (m *UpdateConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateConfigResponse.DiscardUnknown(m) +func (m *RegisterTracepointRequest_TracepointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.DiscardUnknown(m) } -var xxx_messageInfo_UpdateConfigResponse proto.InternalMessageInfo +var xxx_messageInfo_RegisterTracepointRequest_TracepointRequest proto.InternalMessageInfo -func (m *UpdateConfigResponse) GetStatus() *statuspb.Status { +func (m *RegisterTracepointRequest_TracepointRequest) GetTracepointDeployment() *logicalpb.TracepointDeployment { if m != nil { - return m.Status + return m.TracepointDeployment } return nil } -type GetScriptsRequest struct { +func (m *RegisterTracepointRequest_TracepointRequest) GetName() string { + if m != nil { + return m.Name + } + return "" } -func (m *GetScriptsRequest) Reset() { *m = GetScriptsRequest{} } -func (*GetScriptsRequest) ProtoMessage() {} -func (*GetScriptsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{18} -} -func (m *GetScriptsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetScriptsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *RegisterTracepointRequest_TracepointRequest) GetTTL() *types.Duration { + if m != nil { + return m.TTL } + return nil } -func (m *GetScriptsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetScriptsRequest.Merge(m, src) -} -func (m *GetScriptsRequest) XXX_Size() int { - return m.Size() -} -func (m *GetScriptsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetScriptsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetScriptsRequest proto.InternalMessageInfo -type GetScriptsResponse struct { - Scripts map[string]*cvmsgspb.CronScript `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +type RegisterTracepointResponse struct { + Tracepoints []*RegisterTracepointResponse_TracepointStatus `protobuf:"bytes,1,rep,name=tracepoints,proto3" json:"tracepoints,omitempty"` + Status *statuspb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` } -func (m *GetScriptsResponse) Reset() { *m = GetScriptsResponse{} } -func (*GetScriptsResponse) ProtoMessage() {} -func (*GetScriptsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{19} +func (m *RegisterTracepointResponse) Reset() { *m = RegisterTracepointResponse{} } +func (*RegisterTracepointResponse) ProtoMessage() {} +func (*RegisterTracepointResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{17} } -func (m *GetScriptsResponse) XXX_Unmarshal(b []byte) error { +func (m *RegisterTracepointResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetScriptsResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterTracepointResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1258,40 +1166,51 @@ func (m *GetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *GetScriptsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetScriptsResponse.Merge(m, src) +func (m *RegisterTracepointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterTracepointResponse.Merge(m, src) } -func (m *GetScriptsResponse) XXX_Size() int { +func (m *RegisterTracepointResponse) XXX_Size() int { return m.Size() } -func (m *GetScriptsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetScriptsResponse.DiscardUnknown(m) +func (m *RegisterTracepointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterTracepointResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetScriptsResponse proto.InternalMessageInfo +var xxx_messageInfo_RegisterTracepointResponse proto.InternalMessageInfo -func (m *GetScriptsResponse) GetScripts() map[string]*cvmsgspb.CronScript { +func (m *RegisterTracepointResponse) GetTracepoints() []*RegisterTracepointResponse_TracepointStatus { if m != nil { - return m.Scripts + return m.Tracepoints } return nil } -type AddOrUpdateScriptRequest struct { - Script *cvmsgspb.CronScript `protobuf:"bytes,1,opt,name=script,proto3" json:"script,omitempty"` +func (m *RegisterTracepointResponse) GetStatus() *statuspb.Status { + if m != nil { + return m.Status + } + return nil } -func (m *AddOrUpdateScriptRequest) Reset() { *m = AddOrUpdateScriptRequest{} } -func (*AddOrUpdateScriptRequest) ProtoMessage() {} -func (*AddOrUpdateScriptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{20} +type RegisterTracepointResponse_TracepointStatus struct { + Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + ID *uuidpb.UUID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } -func (m *AddOrUpdateScriptRequest) XXX_Unmarshal(b []byte) error { + +func (m *RegisterTracepointResponse_TracepointStatus) Reset() { + *m = RegisterTracepointResponse_TracepointStatus{} +} +func (*RegisterTracepointResponse_TracepointStatus) ProtoMessage() {} +func (*RegisterTracepointResponse_TracepointStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{17, 0} +} +func (m *RegisterTracepointResponse_TracepointStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *AddOrUpdateScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterTracepointResponse_TracepointStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_AddOrUpdateScriptRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1301,39 +1220,54 @@ func (m *AddOrUpdateScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *AddOrUpdateScriptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddOrUpdateScriptRequest.Merge(m, src) +func (m *RegisterTracepointResponse_TracepointStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.Merge(m, src) } -func (m *AddOrUpdateScriptRequest) XXX_Size() int { +func (m *RegisterTracepointResponse_TracepointStatus) XXX_Size() int { return m.Size() } -func (m *AddOrUpdateScriptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddOrUpdateScriptRequest.DiscardUnknown(m) +func (m *RegisterTracepointResponse_TracepointStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.DiscardUnknown(m) } -var xxx_messageInfo_AddOrUpdateScriptRequest proto.InternalMessageInfo +var xxx_messageInfo_RegisterTracepointResponse_TracepointStatus proto.InternalMessageInfo -func (m *AddOrUpdateScriptRequest) GetScript() *cvmsgspb.CronScript { +func (m *RegisterTracepointResponse_TracepointStatus) GetStatus() *statuspb.Status { if m != nil { - return m.Script + return m.Status } return nil } -type AddOrUpdateScriptResponse struct { +func (m *RegisterTracepointResponse_TracepointStatus) GetID() *uuidpb.UUID { + if m != nil { + return m.ID + } + return nil } -func (m *AddOrUpdateScriptResponse) Reset() { *m = AddOrUpdateScriptResponse{} } -func (*AddOrUpdateScriptResponse) ProtoMessage() {} -func (*AddOrUpdateScriptResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{21} +func (m *RegisterTracepointResponse_TracepointStatus) GetName() string { + if m != nil { + return m.Name + } + return "" } -func (m *AddOrUpdateScriptResponse) XXX_Unmarshal(b []byte) error { + +type GetTracepointInfoRequest struct { + IDs []*uuidpb.UUID `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (m *GetTracepointInfoRequest) Reset() { *m = GetTracepointInfoRequest{} } +func (*GetTracepointInfoRequest) ProtoMessage() {} +func (*GetTracepointInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{18} +} +func (m *GetTracepointInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *AddOrUpdateScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetTracepointInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_AddOrUpdateScriptResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetTracepointInfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1343,33 +1277,40 @@ func (m *AddOrUpdateScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *AddOrUpdateScriptResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddOrUpdateScriptResponse.Merge(m, src) +func (m *GetTracepointInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTracepointInfoRequest.Merge(m, src) } -func (m *AddOrUpdateScriptResponse) XXX_Size() int { +func (m *GetTracepointInfoRequest) XXX_Size() int { return m.Size() } -func (m *AddOrUpdateScriptResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddOrUpdateScriptResponse.DiscardUnknown(m) +func (m *GetTracepointInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTracepointInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_AddOrUpdateScriptResponse proto.InternalMessageInfo +var xxx_messageInfo_GetTracepointInfoRequest proto.InternalMessageInfo -type DeleteScriptRequest struct { - ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` +func (m *GetTracepointInfoRequest) GetIDs() []*uuidpb.UUID { + if m != nil { + return m.IDs + } + return nil } -func (m *DeleteScriptRequest) Reset() { *m = DeleteScriptRequest{} } -func (*DeleteScriptRequest) ProtoMessage() {} -func (*DeleteScriptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{22} +type GetTracepointInfoResponse struct { + Tracepoints []*GetTracepointInfoResponse_TracepointState `protobuf:"bytes,1,rep,name=tracepoints,proto3" json:"tracepoints,omitempty"` } -func (m *DeleteScriptRequest) XXX_Unmarshal(b []byte) error { + +func (m *GetTracepointInfoResponse) Reset() { *m = GetTracepointInfoResponse{} } +func (*GetTracepointInfoResponse) ProtoMessage() {} +func (*GetTracepointInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{19} +} +func (m *GetTracepointInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *DeleteScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetTracepointInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_DeleteScriptRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_GetTracepointInfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1379,39 +1320,47 @@ func (m *DeleteScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *DeleteScriptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteScriptRequest.Merge(m, src) +func (m *GetTracepointInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTracepointInfoResponse.Merge(m, src) } -func (m *DeleteScriptRequest) XXX_Size() int { +func (m *GetTracepointInfoResponse) XXX_Size() int { return m.Size() } -func (m *DeleteScriptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteScriptRequest.DiscardUnknown(m) +func (m *GetTracepointInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTracepointInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_DeleteScriptRequest proto.InternalMessageInfo +var xxx_messageInfo_GetTracepointInfoResponse proto.InternalMessageInfo -func (m *DeleteScriptRequest) GetScriptID() *uuidpb.UUID { +func (m *GetTracepointInfoResponse) GetTracepoints() []*GetTracepointInfoResponse_TracepointState { if m != nil { - return m.ScriptID + return m.Tracepoints } return nil } -type DeleteScriptResponse struct { +type GetTracepointInfoResponse_TracepointState struct { + ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + State statuspb.LifeCycleState `protobuf:"varint,2,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` + Statuses []*statuspb.Status `protobuf:"bytes,3,rep,name=statuses,proto3" json:"statuses,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + ExpectedState statuspb.LifeCycleState `protobuf:"varint,5,opt,name=expected_state,json=expectedState,proto3,enum=px.statuspb.LifeCycleState" json:"expected_state,omitempty"` + SchemaNames []string `protobuf:"bytes,6,rep,name=schema_names,json=schemaNames,proto3" json:"schema_names,omitempty"` } -func (m *DeleteScriptResponse) Reset() { *m = DeleteScriptResponse{} } -func (*DeleteScriptResponse) ProtoMessage() {} -func (*DeleteScriptResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{23} +func (m *GetTracepointInfoResponse_TracepointState) Reset() { + *m = GetTracepointInfoResponse_TracepointState{} } -func (m *DeleteScriptResponse) XXX_Unmarshal(b []byte) error { +func (*GetTracepointInfoResponse_TracepointState) ProtoMessage() {} +func (*GetTracepointInfoResponse_TracepointState) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{19, 0} +} +func (m *GetTracepointInfoResponse_TracepointState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *DeleteScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetTracepointInfoResponse_TracepointState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_DeleteScriptResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetTracepointInfoResponse_TracepointState.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1421,33 +1370,75 @@ func (m *DeleteScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *DeleteScriptResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteScriptResponse.Merge(m, src) +func (m *GetTracepointInfoResponse_TracepointState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTracepointInfoResponse_TracepointState.Merge(m, src) } -func (m *DeleteScriptResponse) XXX_Size() int { +func (m *GetTracepointInfoResponse_TracepointState) XXX_Size() int { return m.Size() } -func (m *DeleteScriptResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteScriptResponse.DiscardUnknown(m) +func (m *GetTracepointInfoResponse_TracepointState) XXX_DiscardUnknown() { + xxx_messageInfo_GetTracepointInfoResponse_TracepointState.DiscardUnknown(m) } -var xxx_messageInfo_DeleteScriptResponse proto.InternalMessageInfo +var xxx_messageInfo_GetTracepointInfoResponse_TracepointState proto.InternalMessageInfo -type SetScriptsRequest struct { - Scripts map[string]*cvmsgspb.CronScript `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} +func (m *GetTracepointInfoResponse_TracepointState) GetID() *uuidpb.UUID { + if m != nil { + return m.ID + } + return nil +} -func (m *SetScriptsRequest) Reset() { *m = SetScriptsRequest{} } -func (*SetScriptsRequest) ProtoMessage() {} -func (*SetScriptsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{24} +func (m *GetTracepointInfoResponse_TracepointState) GetState() statuspb.LifeCycleState { + if m != nil { + return m.State + } + return statuspb.UNKNOWN_STATE } -func (m *SetScriptsRequest) XXX_Unmarshal(b []byte) error { + +func (m *GetTracepointInfoResponse_TracepointState) GetStatuses() []*statuspb.Status { + if m != nil { + return m.Statuses + } + return nil +} + +func (m *GetTracepointInfoResponse_TracepointState) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetTracepointInfoResponse_TracepointState) GetExpectedState() statuspb.LifeCycleState { + if m != nil { + return m.ExpectedState + } + return statuspb.UNKNOWN_STATE +} + +func (m *GetTracepointInfoResponse_TracepointState) GetSchemaNames() []string { + if m != nil { + return m.SchemaNames + } + return nil +} + +type RemoveTracepointRequest struct { + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` +} + +func (m *RemoveTracepointRequest) Reset() { *m = RemoveTracepointRequest{} } +func (*RemoveTracepointRequest) ProtoMessage() {} +func (*RemoveTracepointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{20} +} +func (m *RemoveTracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RemoveTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_SetScriptsRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RemoveTracepointRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1457,39 +1448,40 @@ func (m *SetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (m *SetScriptsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetScriptsRequest.Merge(m, src) +func (m *RemoveTracepointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveTracepointRequest.Merge(m, src) } -func (m *SetScriptsRequest) XXX_Size() int { +func (m *RemoveTracepointRequest) XXX_Size() int { return m.Size() } -func (m *SetScriptsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetScriptsRequest.DiscardUnknown(m) +func (m *RemoveTracepointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveTracepointRequest.DiscardUnknown(m) } -var xxx_messageInfo_SetScriptsRequest proto.InternalMessageInfo +var xxx_messageInfo_RemoveTracepointRequest proto.InternalMessageInfo -func (m *SetScriptsRequest) GetScripts() map[string]*cvmsgspb.CronScript { +func (m *RemoveTracepointRequest) GetNames() []string { if m != nil { - return m.Scripts + return m.Names } return nil } -type SetScriptsResponse struct { +type RemoveTracepointResponse struct { + Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (m *SetScriptsResponse) Reset() { *m = SetScriptsResponse{} } -func (*SetScriptsResponse) ProtoMessage() {} -func (*SetScriptsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{25} +func (m *RemoveTracepointResponse) Reset() { *m = RemoveTracepointResponse{} } +func (*RemoveTracepointResponse) ProtoMessage() {} +func (*RemoveTracepointResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{21} } -func (m *SetScriptsResponse) XXX_Unmarshal(b []byte) error { +func (m *RemoveTracepointResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RemoveTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_SetScriptsResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_RemoveTracepointResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1499,36 +1491,42 @@ func (m *SetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *SetScriptsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetScriptsResponse.Merge(m, src) +func (m *RemoveTracepointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveTracepointResponse.Merge(m, src) } -func (m *SetScriptsResponse) XXX_Size() int { +func (m *RemoveTracepointResponse) XXX_Size() int { return m.Size() } -func (m *SetScriptsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SetScriptsResponse.DiscardUnknown(m) +func (m *RemoveTracepointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveTracepointResponse.DiscardUnknown(m) } -var xxx_messageInfo_SetScriptsResponse proto.InternalMessageInfo +var xxx_messageInfo_RemoveTracepointResponse proto.InternalMessageInfo -type ExecutionStats struct { - ExecutionTimeNs int64 `protobuf:"varint,1,opt,name=execution_time_ns,json=executionTimeNs,proto3" json:"execution_time_ns,omitempty"` - CompilationTimeNs int64 `protobuf:"varint,2,opt,name=compilation_time_ns,json=compilationTimeNs,proto3" json:"compilation_time_ns,omitempty"` - BytesProcessed int64 `protobuf:"varint,3,opt,name=bytes_processed,json=bytesProcessed,proto3" json:"bytes_processed,omitempty"` - RecordsProcessed int64 `protobuf:"varint,4,opt,name=records_processed,json=recordsProcessed,proto3" json:"records_processed,omitempty"` +func (m *RemoveTracepointResponse) GetStatus() *statuspb.Status { + if m != nil { + return m.Status + } + return nil } -func (m *ExecutionStats) Reset() { *m = ExecutionStats{} } -func (*ExecutionStats) ProtoMessage() {} -func (*ExecutionStats) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{26} +type UpdateConfigRequest struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + AgentPodName string `protobuf:"bytes,3,opt,name=agent_pod_name,json=agentPodName,proto3" json:"agent_pod_name,omitempty"` } -func (m *ExecutionStats) XXX_Unmarshal(b []byte) error { + +func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } +func (*UpdateConfigRequest) ProtoMessage() {} +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{22} +} +func (m *UpdateConfigRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ExecutionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *UpdateConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ExecutionStats.Marshal(b, m, deterministic) + return xxx_messageInfo_UpdateConfigRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1538,66 +1536,54 @@ func (m *ExecutionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (m *ExecutionStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecutionStats.Merge(m, src) +func (m *UpdateConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateConfigRequest.Merge(m, src) } -func (m *ExecutionStats) XXX_Size() int { +func (m *UpdateConfigRequest) XXX_Size() int { return m.Size() } -func (m *ExecutionStats) XXX_DiscardUnknown() { - xxx_messageInfo_ExecutionStats.DiscardUnknown(m) +func (m *UpdateConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateConfigRequest.DiscardUnknown(m) } -var xxx_messageInfo_ExecutionStats proto.InternalMessageInfo - -func (m *ExecutionStats) GetExecutionTimeNs() int64 { - if m != nil { - return m.ExecutionTimeNs - } - return 0 -} +var xxx_messageInfo_UpdateConfigRequest proto.InternalMessageInfo -func (m *ExecutionStats) GetCompilationTimeNs() int64 { +func (m *UpdateConfigRequest) GetKey() string { if m != nil { - return m.CompilationTimeNs + return m.Key } - return 0 + return "" } -func (m *ExecutionStats) GetBytesProcessed() int64 { +func (m *UpdateConfigRequest) GetValue() string { if m != nil { - return m.BytesProcessed + return m.Value } - return 0 + return "" } -func (m *ExecutionStats) GetRecordsProcessed() int64 { +func (m *UpdateConfigRequest) GetAgentPodName() string { if m != nil { - return m.RecordsProcessed + return m.AgentPodName } - return 0 + return "" } -type RecordExecutionResultRequest struct { - ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` - Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Types that are valid to be assigned to Result: - // *RecordExecutionResultRequest_Error - // *RecordExecutionResultRequest_ExecutionStats - Result isRecordExecutionResultRequest_Result `protobuf_oneof:"result"` +type UpdateConfigResponse struct { + Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (m *RecordExecutionResultRequest) Reset() { *m = RecordExecutionResultRequest{} } -func (*RecordExecutionResultRequest) ProtoMessage() {} -func (*RecordExecutionResultRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{27} +func (m *UpdateConfigResponse) Reset() { *m = UpdateConfigResponse{} } +func (*UpdateConfigResponse) ProtoMessage() {} +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{23} } -func (m *RecordExecutionResultRequest) XXX_Unmarshal(b []byte) error { +func (m *UpdateConfigResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RecordExecutionResultRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *UpdateConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RecordExecutionResultRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_UpdateConfigResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1607,92 +1593,118 @@ func (m *RecordExecutionResultRequest) XXX_Marshal(b []byte, deterministic bool) return b[:n], nil } } -func (m *RecordExecutionResultRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RecordExecutionResultRequest.Merge(m, src) +func (m *UpdateConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateConfigResponse.Merge(m, src) } -func (m *RecordExecutionResultRequest) XXX_Size() int { +func (m *UpdateConfigResponse) XXX_Size() int { return m.Size() } -func (m *RecordExecutionResultRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RecordExecutionResultRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RecordExecutionResultRequest proto.InternalMessageInfo - -type isRecordExecutionResultRequest_Result interface { - isRecordExecutionResultRequest_Result() - Equal(interface{}) bool - MarshalTo([]byte) (int, error) - Size() int -} - -type RecordExecutionResultRequest_Error struct { - Error *statuspb.Status `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` -} -type RecordExecutionResultRequest_ExecutionStats struct { - ExecutionStats *ExecutionStats `protobuf:"bytes,4,opt,name=execution_stats,json=executionStats,proto3,oneof" json:"execution_stats,omitempty"` +func (m *UpdateConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateConfigResponse.DiscardUnknown(m) } -func (*RecordExecutionResultRequest_Error) isRecordExecutionResultRequest_Result() {} -func (*RecordExecutionResultRequest_ExecutionStats) isRecordExecutionResultRequest_Result() {} +var xxx_messageInfo_UpdateConfigResponse proto.InternalMessageInfo -func (m *RecordExecutionResultRequest) GetResult() isRecordExecutionResultRequest_Result { +func (m *UpdateConfigResponse) GetStatus() *statuspb.Status { if m != nil { - return m.Result + return m.Status } return nil } -func (m *RecordExecutionResultRequest) GetScriptID() *uuidpb.UUID { - if m != nil { - return m.ScriptID - } - return nil +type GetScriptsRequest struct { } -func (m *RecordExecutionResultRequest) GetTimestamp() *types.Timestamp { - if m != nil { - return m.Timestamp - } - return nil +func (m *GetScriptsRequest) Reset() { *m = GetScriptsRequest{} } +func (*GetScriptsRequest) ProtoMessage() {} +func (*GetScriptsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{24} } - -func (m *RecordExecutionResultRequest) GetError() *statuspb.Status { - if x, ok := m.GetResult().(*RecordExecutionResultRequest_Error); ok { - return x.Error - } - return nil +func (m *GetScriptsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *RecordExecutionResultRequest) GetExecutionStats() *ExecutionStats { - if x, ok := m.GetResult().(*RecordExecutionResultRequest_ExecutionStats); ok { - return x.ExecutionStats - } - return nil +func (m *GetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetScriptsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetScriptsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetScriptsRequest.Merge(m, src) +} +func (m *GetScriptsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetScriptsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetScriptsRequest.DiscardUnknown(m) } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*RecordExecutionResultRequest) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*RecordExecutionResultRequest_Error)(nil), - (*RecordExecutionResultRequest_ExecutionStats)(nil), +var xxx_messageInfo_GetScriptsRequest proto.InternalMessageInfo + +type GetScriptsResponse struct { + Scripts map[string]*cvmsgspb.CronScript `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *GetScriptsResponse) Reset() { *m = GetScriptsResponse{} } +func (*GetScriptsResponse) ProtoMessage() {} +func (*GetScriptsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{25} +} +func (m *GetScriptsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetScriptsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } } +func (m *GetScriptsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetScriptsResponse.Merge(m, src) +} +func (m *GetScriptsResponse) XXX_Size() int { + return m.Size() +} +func (m *GetScriptsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetScriptsResponse.DiscardUnknown(m) +} -type RecordExecutionResultResponse struct { +var xxx_messageInfo_GetScriptsResponse proto.InternalMessageInfo + +func (m *GetScriptsResponse) GetScripts() map[string]*cvmsgspb.CronScript { + if m != nil { + return m.Scripts + } + return nil } -func (m *RecordExecutionResultResponse) Reset() { *m = RecordExecutionResultResponse{} } -func (*RecordExecutionResultResponse) ProtoMessage() {} -func (*RecordExecutionResultResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{28} +type AddOrUpdateScriptRequest struct { + Script *cvmsgspb.CronScript `protobuf:"bytes,1,opt,name=script,proto3" json:"script,omitempty"` } -func (m *RecordExecutionResultResponse) XXX_Unmarshal(b []byte) error { + +func (m *AddOrUpdateScriptRequest) Reset() { *m = AddOrUpdateScriptRequest{} } +func (*AddOrUpdateScriptRequest) ProtoMessage() {} +func (*AddOrUpdateScriptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{26} +} +func (m *AddOrUpdateScriptRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RecordExecutionResultResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *AddOrUpdateScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RecordExecutionResultResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_AddOrUpdateScriptRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1702,32 +1714,39 @@ func (m *RecordExecutionResultResponse) XXX_Marshal(b []byte, deterministic bool return b[:n], nil } } -func (m *RecordExecutionResultResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RecordExecutionResultResponse.Merge(m, src) +func (m *AddOrUpdateScriptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddOrUpdateScriptRequest.Merge(m, src) } -func (m *RecordExecutionResultResponse) XXX_Size() int { +func (m *AddOrUpdateScriptRequest) XXX_Size() int { return m.Size() } -func (m *RecordExecutionResultResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RecordExecutionResultResponse.DiscardUnknown(m) +func (m *AddOrUpdateScriptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddOrUpdateScriptRequest.DiscardUnknown(m) } -var xxx_messageInfo_RecordExecutionResultResponse proto.InternalMessageInfo +var xxx_messageInfo_AddOrUpdateScriptRequest proto.InternalMessageInfo -type GetAllExecutionResultsRequest struct { +func (m *AddOrUpdateScriptRequest) GetScript() *cvmsgspb.CronScript { + if m != nil { + return m.Script + } + return nil } -func (m *GetAllExecutionResultsRequest) Reset() { *m = GetAllExecutionResultsRequest{} } -func (*GetAllExecutionResultsRequest) ProtoMessage() {} -func (*GetAllExecutionResultsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{29} +type AddOrUpdateScriptResponse struct { } -func (m *GetAllExecutionResultsRequest) XXX_Unmarshal(b []byte) error { + +func (m *AddOrUpdateScriptResponse) Reset() { *m = AddOrUpdateScriptResponse{} } +func (*AddOrUpdateScriptResponse) ProtoMessage() {} +func (*AddOrUpdateScriptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{27} +} +func (m *AddOrUpdateScriptResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetAllExecutionResultsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *AddOrUpdateScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetAllExecutionResultsRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_AddOrUpdateScriptResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1737,33 +1756,33 @@ func (m *GetAllExecutionResultsRequest) XXX_Marshal(b []byte, deterministic bool return b[:n], nil } } -func (m *GetAllExecutionResultsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllExecutionResultsRequest.Merge(m, src) +func (m *AddOrUpdateScriptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddOrUpdateScriptResponse.Merge(m, src) } -func (m *GetAllExecutionResultsRequest) XXX_Size() int { +func (m *AddOrUpdateScriptResponse) XXX_Size() int { return m.Size() } -func (m *GetAllExecutionResultsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllExecutionResultsRequest.DiscardUnknown(m) +func (m *AddOrUpdateScriptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AddOrUpdateScriptResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetAllExecutionResultsRequest proto.InternalMessageInfo +var xxx_messageInfo_AddOrUpdateScriptResponse proto.InternalMessageInfo -type GetAllExecutionResultsResponse struct { - Results []*GetAllExecutionResultsResponse_ExecutionResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +type DeleteScriptRequest struct { + ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` } -func (m *GetAllExecutionResultsResponse) Reset() { *m = GetAllExecutionResultsResponse{} } -func (*GetAllExecutionResultsResponse) ProtoMessage() {} -func (*GetAllExecutionResultsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{30} +func (m *DeleteScriptRequest) Reset() { *m = DeleteScriptRequest{} } +func (*DeleteScriptRequest) ProtoMessage() {} +func (*DeleteScriptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{28} } -func (m *GetAllExecutionResultsResponse) XXX_Unmarshal(b []byte) error { +func (m *DeleteScriptRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetAllExecutionResultsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeleteScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetAllExecutionResultsResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_DeleteScriptRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1773,47 +1792,39 @@ func (m *GetAllExecutionResultsResponse) XXX_Marshal(b []byte, deterministic boo return b[:n], nil } } -func (m *GetAllExecutionResultsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllExecutionResultsResponse.Merge(m, src) +func (m *DeleteScriptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteScriptRequest.Merge(m, src) } -func (m *GetAllExecutionResultsResponse) XXX_Size() int { +func (m *DeleteScriptRequest) XXX_Size() int { return m.Size() } -func (m *GetAllExecutionResultsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllExecutionResultsResponse.DiscardUnknown(m) +func (m *DeleteScriptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteScriptRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetAllExecutionResultsResponse proto.InternalMessageInfo +var xxx_messageInfo_DeleteScriptRequest proto.InternalMessageInfo -func (m *GetAllExecutionResultsResponse) GetResults() []*GetAllExecutionResultsResponse_ExecutionResult { +func (m *DeleteScriptRequest) GetScriptID() *uuidpb.UUID { if m != nil { - return m.Results + return m.ScriptID } return nil } -type GetAllExecutionResultsResponse_ExecutionResult struct { - ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` - Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Types that are valid to be assigned to Result: - // *GetAllExecutionResultsResponse_ExecutionResult_Error - // *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats - Result isGetAllExecutionResultsResponse_ExecutionResult_Result `protobuf_oneof:"result"` +type DeleteScriptResponse struct { } -func (m *GetAllExecutionResultsResponse_ExecutionResult) Reset() { - *m = GetAllExecutionResultsResponse_ExecutionResult{} -} -func (*GetAllExecutionResultsResponse_ExecutionResult) ProtoMessage() {} -func (*GetAllExecutionResultsResponse_ExecutionResult) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{30, 0} +func (m *DeleteScriptResponse) Reset() { *m = DeleteScriptResponse{} } +func (*DeleteScriptResponse) ProtoMessage() {} +func (*DeleteScriptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{29} } -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Unmarshal(b []byte) error { +func (m *DeleteScriptResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeleteScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.Marshal(b, m, deterministic) + return xxx_messageInfo_DeleteScriptResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1823,497 +1834,685 @@ func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Marshal(b []byte, d return b[:n], nil } } -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.Merge(m, src) +func (m *DeleteScriptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteScriptResponse.Merge(m, src) } -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Size() int { +func (m *DeleteScriptResponse) XXX_Size() int { return m.Size() } -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.DiscardUnknown(m) +func (m *DeleteScriptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteScriptResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult proto.InternalMessageInfo +var xxx_messageInfo_DeleteScriptResponse proto.InternalMessageInfo -type isGetAllExecutionResultsResponse_ExecutionResult_Result interface { - isGetAllExecutionResultsResponse_ExecutionResult_Result() - Equal(interface{}) bool - MarshalTo([]byte) (int, error) - Size() int +type SetScriptsRequest struct { + Scripts map[string]*cvmsgspb.CronScript `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -type GetAllExecutionResultsResponse_ExecutionResult_Error struct { - Error *statuspb.Status `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` +func (m *SetScriptsRequest) Reset() { *m = SetScriptsRequest{} } +func (*SetScriptsRequest) ProtoMessage() {} +func (*SetScriptsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{30} } -type GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats struct { - ExecutionStats *ExecutionStats `protobuf:"bytes,4,opt,name=execution_stats,json=executionStats,proto3,oneof" json:"execution_stats,omitempty"` +func (m *SetScriptsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (*GetAllExecutionResultsResponse_ExecutionResult_Error) isGetAllExecutionResultsResponse_ExecutionResult_Result() { +func (m *SetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SetScriptsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) isGetAllExecutionResultsResponse_ExecutionResult_Result() { +func (m *SetScriptsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetScriptsRequest.Merge(m, src) } - -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetResult() isGetAllExecutionResultsResponse_ExecutionResult_Result { - if m != nil { - return m.Result - } - return nil +func (m *SetScriptsRequest) XXX_Size() int { + return m.Size() +} +func (m *SetScriptsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetScriptsRequest.DiscardUnknown(m) } -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetScriptID() *uuidpb.UUID { +var xxx_messageInfo_SetScriptsRequest proto.InternalMessageInfo + +func (m *SetScriptsRequest) GetScripts() map[string]*cvmsgspb.CronScript { if m != nil { - return m.ScriptID + return m.Scripts } return nil } -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetTimestamp() *types.Timestamp { - if m != nil { - return m.Timestamp - } - return nil +type SetScriptsResponse struct { } -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetError() *statuspb.Status { - if x, ok := m.GetResult().(*GetAllExecutionResultsResponse_ExecutionResult_Error); ok { - return x.Error +func (m *SetScriptsResponse) Reset() { *m = SetScriptsResponse{} } +func (*SetScriptsResponse) ProtoMessage() {} +func (*SetScriptsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{31} +} +func (m *SetScriptsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SetScriptsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return nil +} +func (m *SetScriptsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetScriptsResponse.Merge(m, src) +} +func (m *SetScriptsResponse) XXX_Size() int { + return m.Size() +} +func (m *SetScriptsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetScriptsResponse.DiscardUnknown(m) } -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetExecutionStats() *ExecutionStats { - if x, ok := m.GetResult().(*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats); ok { - return x.ExecutionStats - } - return nil +var xxx_messageInfo_SetScriptsResponse proto.InternalMessageInfo + +type ExecutionStats struct { + ExecutionTimeNs int64 `protobuf:"varint,1,opt,name=execution_time_ns,json=executionTimeNs,proto3" json:"execution_time_ns,omitempty"` + CompilationTimeNs int64 `protobuf:"varint,2,opt,name=compilation_time_ns,json=compilationTimeNs,proto3" json:"compilation_time_ns,omitempty"` + BytesProcessed int64 `protobuf:"varint,3,opt,name=bytes_processed,json=bytesProcessed,proto3" json:"bytes_processed,omitempty"` + RecordsProcessed int64 `protobuf:"varint,4,opt,name=records_processed,json=recordsProcessed,proto3" json:"records_processed,omitempty"` } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*GetAllExecutionResultsResponse_ExecutionResult) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*GetAllExecutionResultsResponse_ExecutionResult_Error)(nil), - (*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats)(nil), +func (m *ExecutionStats) Reset() { *m = ExecutionStats{} } +func (*ExecutionStats) ProtoMessage() {} +func (*ExecutionStats) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{32} +} +func (m *ExecutionStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecutionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecutionStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } } +func (m *ExecutionStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecutionStats.Merge(m, src) +} +func (m *ExecutionStats) XXX_Size() int { + return m.Size() +} +func (m *ExecutionStats) XXX_DiscardUnknown() { + xxx_messageInfo_ExecutionStats.DiscardUnknown(m) +} -func init() { - proto.RegisterType((*SchemaRequest)(nil), "px.vizier.services.metadata.SchemaRequest") - proto.RegisterType((*SchemaResponse)(nil), "px.vizier.services.metadata.SchemaResponse") - proto.RegisterType((*AgentInfoRequest)(nil), "px.vizier.services.metadata.AgentInfoRequest") - proto.RegisterType((*AgentInfoResponse)(nil), "px.vizier.services.metadata.AgentInfoResponse") - proto.RegisterType((*AgentMetadata)(nil), "px.vizier.services.metadata.AgentMetadata") - proto.RegisterType((*AgentUpdatesRequest)(nil), "px.vizier.services.metadata.AgentUpdatesRequest") - proto.RegisterType((*AgentUpdate)(nil), "px.vizier.services.metadata.AgentUpdate") - proto.RegisterType((*AgentUpdatesResponse)(nil), "px.vizier.services.metadata.AgentUpdatesResponse") - proto.RegisterType((*WithPrefixKeyRequest)(nil), "px.vizier.services.metadata.WithPrefixKeyRequest") - proto.RegisterType((*WithPrefixKeyResponse)(nil), "px.vizier.services.metadata.WithPrefixKeyResponse") - proto.RegisterType((*WithPrefixKeyResponse_KV)(nil), "px.vizier.services.metadata.WithPrefixKeyResponse.KV") - proto.RegisterType((*RegisterTracepointRequest)(nil), "px.vizier.services.metadata.RegisterTracepointRequest") - proto.RegisterType((*RegisterTracepointRequest_TracepointRequest)(nil), "px.vizier.services.metadata.RegisterTracepointRequest.TracepointRequest") - proto.RegisterType((*RegisterTracepointResponse)(nil), "px.vizier.services.metadata.RegisterTracepointResponse") - proto.RegisterType((*RegisterTracepointResponse_TracepointStatus)(nil), "px.vizier.services.metadata.RegisterTracepointResponse.TracepointStatus") - proto.RegisterType((*GetTracepointInfoRequest)(nil), "px.vizier.services.metadata.GetTracepointInfoRequest") - proto.RegisterType((*GetTracepointInfoResponse)(nil), "px.vizier.services.metadata.GetTracepointInfoResponse") - proto.RegisterType((*GetTracepointInfoResponse_TracepointState)(nil), "px.vizier.services.metadata.GetTracepointInfoResponse.TracepointState") - proto.RegisterType((*RemoveTracepointRequest)(nil), "px.vizier.services.metadata.RemoveTracepointRequest") - proto.RegisterType((*RemoveTracepointResponse)(nil), "px.vizier.services.metadata.RemoveTracepointResponse") - proto.RegisterType((*UpdateConfigRequest)(nil), "px.vizier.services.metadata.UpdateConfigRequest") - proto.RegisterType((*UpdateConfigResponse)(nil), "px.vizier.services.metadata.UpdateConfigResponse") - proto.RegisterType((*GetScriptsRequest)(nil), "px.vizier.services.metadata.GetScriptsRequest") - proto.RegisterType((*GetScriptsResponse)(nil), "px.vizier.services.metadata.GetScriptsResponse") - proto.RegisterMapType((map[string]*cvmsgspb.CronScript)(nil), "px.vizier.services.metadata.GetScriptsResponse.ScriptsEntry") - proto.RegisterType((*AddOrUpdateScriptRequest)(nil), "px.vizier.services.metadata.AddOrUpdateScriptRequest") - proto.RegisterType((*AddOrUpdateScriptResponse)(nil), "px.vizier.services.metadata.AddOrUpdateScriptResponse") - proto.RegisterType((*DeleteScriptRequest)(nil), "px.vizier.services.metadata.DeleteScriptRequest") - proto.RegisterType((*DeleteScriptResponse)(nil), "px.vizier.services.metadata.DeleteScriptResponse") - proto.RegisterType((*SetScriptsRequest)(nil), "px.vizier.services.metadata.SetScriptsRequest") - proto.RegisterMapType((map[string]*cvmsgspb.CronScript)(nil), "px.vizier.services.metadata.SetScriptsRequest.ScriptsEntry") - proto.RegisterType((*SetScriptsResponse)(nil), "px.vizier.services.metadata.SetScriptsResponse") - proto.RegisterType((*ExecutionStats)(nil), "px.vizier.services.metadata.ExecutionStats") - proto.RegisterType((*RecordExecutionResultRequest)(nil), "px.vizier.services.metadata.RecordExecutionResultRequest") - proto.RegisterType((*RecordExecutionResultResponse)(nil), "px.vizier.services.metadata.RecordExecutionResultResponse") - proto.RegisterType((*GetAllExecutionResultsRequest)(nil), "px.vizier.services.metadata.GetAllExecutionResultsRequest") - proto.RegisterType((*GetAllExecutionResultsResponse)(nil), "px.vizier.services.metadata.GetAllExecutionResultsResponse") - proto.RegisterType((*GetAllExecutionResultsResponse_ExecutionResult)(nil), "px.vizier.services.metadata.GetAllExecutionResultsResponse.ExecutionResult") +var xxx_messageInfo_ExecutionStats proto.InternalMessageInfo + +func (m *ExecutionStats) GetExecutionTimeNs() int64 { + if m != nil { + return m.ExecutionTimeNs + } + return 0 } -func init() { - proto.RegisterFile("src/vizier/services/metadata/metadatapb/service.proto", fileDescriptor_bfe4468195647430) +func (m *ExecutionStats) GetCompilationTimeNs() int64 { + if m != nil { + return m.CompilationTimeNs + } + return 0 } -var fileDescriptor_bfe4468195647430 = []byte{ - // 2017 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xdd, 0x6f, 0x23, 0x57, - 0x15, 0xf7, 0x78, 0xf2, 0xe1, 0x9c, 0x64, 0xf3, 0x71, 0xe3, 0x6c, 0x1d, 0x2f, 0x75, 0xb6, 0x23, - 0xa0, 0xab, 0x4d, 0x77, 0xa6, 0x6b, 0xba, 0x4d, 0xd9, 0x96, 0xaa, 0x9b, 0xb8, 0x4d, 0xac, 0x6c, - 0xdb, 0x30, 0xce, 0x06, 0x89, 0x17, 0x6b, 0x3c, 0x73, 0xe3, 0x1d, 0xea, 0xf9, 0x60, 0xe6, 0x3a, - 0x24, 0x08, 0x09, 0x84, 0xc4, 0x1b, 0xaa, 0xe8, 0x03, 0x48, 0x7d, 0x03, 0xf1, 0x02, 0xcf, 0xfc, - 0x01, 0x08, 0x9e, 0x78, 0xdc, 0x27, 0x54, 0x21, 0xb4, 0x22, 0xde, 0x17, 0x9e, 0x50, 0xff, 0x04, - 0x74, 0xbf, 0xec, 0xb1, 0x3d, 0xb6, 0xe3, 0x80, 0x78, 0xe2, 0x29, 0x77, 0xce, 0x9c, 0xf3, 0xbb, - 0xe7, 0xfe, 0xce, 0xb9, 0xf7, 0xfe, 0xc6, 0x81, 0x07, 0x71, 0x64, 0x1b, 0x67, 0xee, 0x0f, 0x5d, - 0x1c, 0x19, 0x31, 0x8e, 0xce, 0x5c, 0x1b, 0xc7, 0x86, 0x87, 0x89, 0xe5, 0x58, 0xc4, 0xea, 0x0e, - 0xc2, 0x86, 0x7c, 0xa9, 0x87, 0x51, 0x40, 0x02, 0x74, 0x2b, 0x3c, 0xd7, 0x79, 0x94, 0x2e, 0xa3, - 0x74, 0xe9, 0x5c, 0xcc, 0x37, 0x83, 0x66, 0xc0, 0xfc, 0x0c, 0x3a, 0xe2, 0x21, 0xc5, 0x52, 0x33, - 0x08, 0x9a, 0x2d, 0x6c, 0xb0, 0xa7, 0x46, 0xfb, 0xd4, 0x70, 0xda, 0x91, 0x45, 0xdc, 0xc0, 0x17, - 0xef, 0xb7, 0x06, 0xdf, 0x13, 0xd7, 0xc3, 0x31, 0xb1, 0xbc, 0x50, 0x3a, 0xd0, 0x54, 0xad, 0xd0, - 0xe5, 0x1e, 0x46, 0xbb, 0xed, 0x3a, 0x61, 0x83, 0xfd, 0x11, 0x0e, 0x3b, 0xd4, 0xc1, 0xb6, 0x22, - 0x3f, 0x20, 0x46, 0xd8, 0xb2, 0x7c, 0x1f, 0x47, 0x86, 0xe3, 0xc6, 0x24, 0x72, 0x1b, 0x6d, 0x82, - 0xa9, 0x73, 0xe2, 0xa9, 0x4e, 0x3d, 0x44, 0xe0, 0xb7, 0xd2, 0x02, 0x2f, 0x7c, 0xcb, 0x73, 0xed, - 0x3a, 0x89, 0x2c, 0xdb, 0xf5, 0x9b, 0x86, 0x1b, 0x19, 0xad, 0xa0, 0xe9, 0xda, 0x56, 0x2b, 0x6c, - 0xc8, 0x91, 0x08, 0xff, 0x1a, 0x0b, 0x0f, 0x3c, 0x2f, 0xf0, 0x8d, 0x86, 0x15, 0x63, 0x23, 0x26, - 0x16, 0x69, 0xc7, 0x94, 0x34, 0x36, 0x48, 0xba, 0x11, 0xab, 0xd1, 0xc2, 0xf5, 0x98, 0x04, 0x11, - 0x36, 0x62, 0xfb, 0x29, 0xf6, 0x18, 0xb7, 0x6c, 0x20, 0xdc, 0xee, 0x25, 0x2a, 0xe2, 0xe1, 0x38, - 0xb6, 0x9a, 0xac, 0x22, 0x7c, 0x10, 0x36, 0xba, 0x43, 0xe1, 0xae, 0xa7, 0x15, 0x30, 0x7e, 0x6a, - 0x45, 0xd8, 0x31, 0xac, 0x26, 0xf6, 0x49, 0xd8, 0xe0, 0x7f, 0x85, 0xff, 0x6d, 0xea, 0x2f, 0xde, - 0xdb, 0x67, 0x5e, 0xdc, 0xa4, 0x98, 0x7c, 0xc0, 0x3d, 0xb4, 0x15, 0xb8, 0x51, 0x63, 0x09, 0x99, - 0xf8, 0xfb, 0x6d, 0x1c, 0x13, 0xad, 0x0a, 0xcb, 0xd2, 0x10, 0x87, 0x81, 0x1f, 0x63, 0xb4, 0x03, - 0x73, 0x3c, 0xe7, 0x42, 0xf6, 0xb6, 0x72, 0x67, 0xb1, 0xbc, 0xa5, 0x87, 0xe7, 0x7a, 0x62, 0x69, - 0xba, 0x5c, 0x9a, 0x2e, 0x02, 0x85, 0xbb, 0x86, 0x60, 0xf5, 0x11, 0x4d, 0xa6, 0xea, 0x9f, 0x06, - 0x12, 0xbe, 0x06, 0x6b, 0x09, 0x9b, 0x98, 0xe1, 0x5d, 0x98, 0x71, 0xfd, 0xd3, 0xa0, 0xa0, 0xdc, - 0x56, 0xef, 0x2c, 0x96, 0xef, 0xea, 0x63, 0xfa, 0x4d, 0x67, 0xd1, 0x1f, 0x8a, 0x27, 0x93, 0xc5, - 0x69, 0x97, 0x0a, 0xdc, 0xe8, 0xb3, 0xa3, 0x77, 0x60, 0x96, 0xf1, 0x50, 0x50, 0x58, 0xca, 0x5f, - 0x4f, 0x83, 0xe4, 0xbc, 0xe8, 0x9c, 0x2f, 0x16, 0x6e, 0xf2, 0x20, 0x54, 0x81, 0x39, 0x5e, 0x4c, - 0xb1, 0xe2, 0xd7, 0xae, 0x16, 0x5e, 0x63, 0x31, 0xa6, 0x88, 0x45, 0x8f, 0x61, 0x91, 0xb7, 0x59, - 0x9d, 0x2d, 0x4e, 0x65, 0x50, 0xdb, 0x14, 0x8a, 0x9b, 0x75, 0xd1, 0x7d, 0x7a, 0x5f, 0xdb, 0xea, - 0x7b, 0xec, 0x25, 0xe3, 0x07, 0xec, 0xee, 0x58, 0xfb, 0x5c, 0x81, 0x75, 0x36, 0xcb, 0x93, 0xd0, - 0xb1, 0x08, 0x8e, 0x05, 0xa1, 0xa8, 0x0a, 0xeb, 0x9e, 0x75, 0x5e, 0x6f, 0x33, 0x6b, 0xdd, 0xf5, - 0x09, 0x8e, 0xce, 0xac, 0x96, 0x58, 0xf7, 0xa6, 0xce, 0xf7, 0x99, 0x2e, 0xf7, 0x99, 0x5e, 0x11, - 0xfb, 0xd0, 0x5c, 0xf3, 0xac, 0x73, 0x0e, 0x55, 0x15, 0x31, 0x68, 0x07, 0x0a, 0x3d, 0xa8, 0xb8, - 0x1e, 0xe2, 0xa8, 0x1e, 0x89, 0x12, 0x31, 0x22, 0x66, 0xcd, 0x8d, 0x6e, 0x50, 0x7c, 0x84, 0x23, - 0x59, 0x3f, 0xed, 0x5f, 0x0a, 0x2c, 0x26, 0x72, 0x43, 0x3b, 0x90, 0x63, 0xb4, 0xd4, 0x5d, 0x47, - 0x24, 0xb2, 0x42, 0x97, 0xcd, 0x37, 0xb1, 0xfe, 0xe4, 0x49, 0xb5, 0xb2, 0xbb, 0xd8, 0x79, 0xbe, - 0x35, 0xcf, 0x3b, 0xa1, 0x62, 0xce, 0x33, 0xef, 0xaa, 0x83, 0x8a, 0x30, 0xef, 0xe0, 0x16, 0x26, - 0xd8, 0x61, 0x13, 0xe6, 0x0e, 0x32, 0xa6, 0x34, 0xa0, 0x77, 0x65, 0x49, 0xd5, 0x69, 0x4a, 0x7a, - 0x90, 0x91, 0x45, 0x7d, 0x0f, 0x16, 0x68, 0x6b, 0xf0, 0x62, 0xcc, 0x30, 0x8c, 0x57, 0x12, 0x18, - 0xdd, 0x9d, 0xc6, 0xc2, 0x2a, 0x16, 0xb1, 0x28, 0xed, 0x07, 0x19, 0x33, 0xe7, 0x88, 0xf1, 0x6e, - 0x0e, 0xe6, 0x38, 0x37, 0xda, 0x67, 0x59, 0xc8, 0xf7, 0x17, 0x43, 0x74, 0xf2, 0x87, 0x70, 0x83, - 0xaf, 0x5c, 0x90, 0x28, 0x5a, 0xfa, 0xce, 0xe4, 0x96, 0xe6, 0x48, 0xe6, 0x92, 0x95, 0x80, 0x45, - 0x47, 0x12, 0x8e, 0xef, 0x28, 0xda, 0x8f, 0xea, 0x95, 0x9a, 0x88, 0xef, 0x44, 0xd6, 0x44, 0x1c, - 0x91, 0x1b, 0x62, 0x54, 0x86, 0x8d, 0x3e, 0x44, 0x91, 0xa8, 0xc3, 0x58, 0xcd, 0x99, 0xeb, 0x49, - 0x67, 0x9e, 0x85, 0x83, 0xbe, 0x0a, 0xcb, 0xd8, 0x77, 0xea, 0xc1, 0x69, 0xfd, 0x0c, 0x47, 0xb1, - 0x1b, 0xf8, 0x8c, 0xbe, 0x9c, 0xb9, 0x84, 0x7d, 0xe7, 0xe3, 0xd3, 0x13, 0x6e, 0xd3, 0x2a, 0x90, - 0xff, 0x8e, 0x4b, 0x9e, 0x1e, 0x45, 0xf8, 0xd4, 0x3d, 0x3f, 0xc4, 0x17, 0xb2, 0x41, 0x6f, 0xc2, - 0x5c, 0xc8, 0x6c, 0xac, 0x15, 0x16, 0x4c, 0xf1, 0x84, 0xf2, 0x30, 0xcb, 0xba, 0x92, 0x55, 0x7a, - 0xc1, 0xe4, 0x0f, 0xda, 0xa7, 0x0a, 0x6c, 0x0c, 0xc0, 0x08, 0x6a, 0xf7, 0x41, 0xfd, 0xe4, 0x4c, - 0x12, 0xfa, 0x60, 0x2c, 0xa1, 0xa9, 0x00, 0xfa, 0xe1, 0x89, 0x49, 0x11, 0x8a, 0xaf, 0x41, 0xf6, - 0xf0, 0x04, 0xad, 0x82, 0xfa, 0x09, 0xbe, 0x10, 0x39, 0xd1, 0x21, 0x4d, 0xe8, 0xcc, 0x6a, 0xb5, - 0x79, 0xaf, 0x2f, 0x99, 0xfc, 0x41, 0xfb, 0x5b, 0x16, 0x36, 0x4d, 0xdc, 0x74, 0x63, 0x82, 0xa3, - 0xe3, 0xc8, 0xb2, 0x71, 0x18, 0xb8, 0x3e, 0x91, 0x8b, 0x73, 0x20, 0x17, 0xf1, 0xa1, 0xcc, 0xec, - 0x60, 0x6c, 0x66, 0x23, 0x91, 0xf4, 0x21, 0x8b, 0xd9, 0x45, 0x2e, 0xfe, 0x55, 0x81, 0xb5, 0xe1, - 0xb9, 0x7f, 0x00, 0x1b, 0xa4, 0x6b, 0xac, 0x3b, 0x38, 0x6c, 0x05, 0x17, 0x5e, 0xef, 0xcc, 0xdb, - 0x4d, 0x6b, 0x92, 0xfe, 0x7b, 0x4e, 0x77, 0x23, 0x5d, 0xde, 0x6e, 0x3d, 0xfc, 0x4a, 0x17, 0xc9, - 0xcc, 0x93, 0x14, 0x2b, 0x42, 0x30, 0xe3, 0x5b, 0x1e, 0x16, 0x85, 0x63, 0x63, 0xf4, 0x06, 0xa8, - 0x84, 0xb4, 0xc4, 0xde, 0x1c, 0x7d, 0xec, 0xec, 0xce, 0x77, 0x9e, 0x6f, 0xa9, 0xc7, 0xc7, 0x8f, - 0x4d, 0xea, 0xae, 0xfd, 0x21, 0x0b, 0xc5, 0x34, 0x4a, 0x44, 0xc9, 0xbf, 0x07, 0x8b, 0xbd, 0x04, - 0xae, 0x4f, 0xb0, 0xa8, 0x7f, 0xcf, 0x24, 0x0e, 0xea, 0x24, 0x38, 0xda, 0x1e, 0x38, 0xf3, 0xd7, - 0xe9, 0x34, 0xf2, 0x6e, 0xd7, 0xfb, 0x8f, 0xf6, 0xe2, 0x8f, 0x60, 0x75, 0x10, 0x2d, 0x01, 0xa0, - 0x4c, 0x04, 0x40, 0xaf, 0x42, 0xd6, 0x75, 0xc4, 0x4c, 0x43, 0x67, 0xe3, 0x5c, 0xe7, 0xf9, 0x56, - 0xb6, 0x5a, 0x31, 0xb3, 0xae, 0xd3, 0xe5, 0x5a, 0xed, 0x71, 0xad, 0x7d, 0x00, 0x85, 0x7d, 0x4c, - 0x7a, 0x09, 0x24, 0xee, 0x57, 0x74, 0x17, 0x54, 0xd7, 0x91, 0x54, 0x0d, 0x21, 0x33, 0xf6, 0xab, - 0x95, 0xd8, 0xa4, 0x4e, 0xda, 0x6f, 0x54, 0xd8, 0x4c, 0x01, 0x12, 0xe4, 0x3f, 0x4d, 0x23, 0xff, - 0x83, 0xb1, 0xe4, 0x8f, 0x04, 0x1b, 0xe0, 0x1e, 0xf7, 0x51, 0x5f, 0xfc, 0x3c, 0x0b, 0x2b, 0x03, - 0x0e, 0x82, 0x20, 0x65, 0x32, 0x41, 0xf7, 0x61, 0x96, 0x72, 0xca, 0xbb, 0x71, 0xb9, 0x7c, 0xab, - 0x8f, 0xf5, 0xc7, 0xee, 0x29, 0xde, 0xbb, 0xb0, 0x5b, 0x98, 0xcf, 0xca, 0x3d, 0x91, 0x01, 0x39, - 0xee, 0x81, 0xe3, 0x82, 0xca, 0x96, 0x95, 0x5a, 0xab, 0xae, 0x53, 0xb7, 0x08, 0x33, 0x89, 0x86, - 0xdf, 0x85, 0x65, 0x7c, 0x1e, 0x62, 0x9b, 0xaa, 0x4b, 0x9e, 0xc0, 0xec, 0xe4, 0x04, 0x6e, 0xc8, - 0x10, 0xbe, 0xc8, 0x57, 0x60, 0x89, 0x1f, 0xc3, 0x75, 0x0a, 0x19, 0x17, 0xe6, 0x6e, 0xab, 0x77, - 0x16, 0xcc, 0x45, 0x6e, 0xfb, 0x88, 0x9a, 0x34, 0x03, 0x5e, 0x32, 0xb1, 0x17, 0x9c, 0xe1, 0xe1, - 0xfd, 0x9f, 0x87, 0x59, 0x1e, 0xa6, 0xb0, 0x30, 0xfe, 0xa0, 0xed, 0x43, 0x61, 0x38, 0x40, 0x94, - 0x74, 0x9a, 0x16, 0xd5, 0x6c, 0x58, 0xe7, 0x17, 0xc0, 0x5e, 0xe0, 0x9f, 0xba, 0x4d, 0x39, 0xeb, - 0x84, 0x73, 0x73, 0x41, 0x9c, 0x9b, 0xf4, 0xd2, 0xe0, 0x17, 0x4d, 0x18, 0x38, 0xf5, 0x44, 0x0b, - 0xf3, 0xeb, 0xe8, 0x28, 0x70, 0xe8, 0xfa, 0xb4, 0x3d, 0xc8, 0xf7, 0x4f, 0x72, 0x9d, 0x4c, 0xd7, - 0x61, 0x6d, 0x1f, 0x93, 0x9a, 0x1d, 0xb9, 0x21, 0x91, 0xba, 0x48, 0xfb, 0x93, 0x02, 0x28, 0x69, - 0x15, 0xc0, 0x27, 0x30, 0x1f, 0x73, 0x93, 0xe8, 0xe8, 0x77, 0x26, 0x75, 0xf4, 0x00, 0x82, 0x2e, - 0x9e, 0xdf, 0xf7, 0x49, 0x74, 0x61, 0x4a, 0xb0, 0x62, 0x0d, 0x96, 0x92, 0x2f, 0x52, 0x68, 0xba, - 0x97, 0xa4, 0x69, 0xb1, 0xfc, 0x12, 0x3b, 0x9e, 0x85, 0x26, 0xd7, 0xf7, 0xa2, 0xc0, 0xe7, 0xf1, - 0x82, 0xbf, 0x87, 0xd9, 0xb7, 0x14, 0xed, 0x10, 0x0a, 0x8f, 0x1c, 0xe7, 0xe3, 0x88, 0x53, 0x24, - 0xde, 0x8b, 0x3a, 0x18, 0x54, 0x95, 0x53, 0x83, 0x60, 0x68, 0x24, 0x9e, 0x70, 0xd3, 0x6e, 0xc1, - 0x66, 0x0a, 0x98, 0x50, 0x70, 0xdf, 0x86, 0xf5, 0x0a, 0xd3, 0x59, 0xfd, 0x93, 0x3c, 0x84, 0x05, - 0x1e, 0x3d, 0x46, 0xc9, 0x2d, 0x75, 0x9e, 0x6f, 0xe5, 0x78, 0x58, 0xb5, 0x62, 0xe6, 0xb8, 0x7f, - 0xd5, 0xd1, 0x6e, 0x42, 0xbe, 0x1f, 0x52, 0x4c, 0xf5, 0x47, 0x05, 0xd6, 0x6a, 0x83, 0xe5, 0x42, - 0x4f, 0x06, 0xeb, 0xf2, 0xf6, 0xd8, 0xba, 0x0c, 0x01, 0xfc, 0x2f, 0xcb, 0x92, 0x07, 0x54, 0x1b, - 0xea, 0x0b, 0xed, 0xcf, 0x0a, 0x2c, 0xbf, 0x7f, 0x8e, 0xed, 0x36, 0xbd, 0xe7, 0x68, 0x87, 0xc6, - 0xe8, 0x2e, 0xac, 0x61, 0x69, 0xa9, 0xd3, 0x2f, 0xdc, 0xba, 0xcf, 0x1b, 0x5a, 0x35, 0x57, 0xba, - 0x2f, 0x8e, 0x5d, 0x0f, 0x7f, 0x14, 0x23, 0x1d, 0xd6, 0xed, 0xc0, 0x0b, 0xdd, 0x96, 0xd5, 0xe7, - 0x9d, 0x65, 0xde, 0x6b, 0x89, 0x57, 0xc2, 0xff, 0x55, 0x58, 0x69, 0x5c, 0x30, 0x99, 0x1e, 0x05, - 0x36, 0x8e, 0x63, 0x21, 0xe1, 0x54, 0x73, 0x99, 0x99, 0x8f, 0xa4, 0x15, 0x6d, 0xc3, 0x5a, 0x84, - 0xed, 0x20, 0x72, 0x92, 0xae, 0x33, 0xcc, 0x75, 0x55, 0xbc, 0xe8, 0x3a, 0x6b, 0xbf, 0xcd, 0xc2, - 0x57, 0x4c, 0x66, 0xec, 0x2e, 0xc5, 0xc4, 0x71, 0xbb, 0xf5, 0xdf, 0xe8, 0x08, 0xf4, 0x16, 0x2c, - 0x74, 0x3f, 0xf3, 0x05, 0xdd, 0xc5, 0x21, 0xa5, 0x70, 0x2c, 0x3d, 0xcc, 0x9e, 0x33, 0xda, 0x86, - 0x59, 0x1c, 0x45, 0x41, 0x24, 0xf4, 0x45, 0xda, 0x69, 0x40, 0x85, 0x3e, 0xf3, 0x41, 0x27, 0xd0, - 0x23, 0x97, 0x1d, 0xcd, 0xb1, 0x90, 0xfb, 0xdb, 0x63, 0x5b, 0xaa, 0xbf, 0x76, 0x07, 0x19, 0x73, - 0x19, 0xf7, 0x59, 0xa8, 0xfc, 0x8f, 0x18, 0x17, 0xda, 0x16, 0xbc, 0x3c, 0x82, 0x24, 0xd1, 0x0b, - 0x5b, 0xf0, 0xf2, 0x3e, 0x26, 0x8f, 0x5a, 0xad, 0x01, 0x87, 0xee, 0xe9, 0xf4, 0x6b, 0x15, 0x4a, - 0xa3, 0x3c, 0xc4, 0x49, 0x85, 0x61, 0x9e, 0x4f, 0x27, 0x77, 0xc4, 0xe1, 0xa4, 0x93, 0x6a, 0x0c, - 0x9a, 0x3e, 0x98, 0xa9, 0xc4, 0x2e, 0xfe, 0x2a, 0x0b, 0x2b, 0x03, 0x2f, 0xff, 0x5f, 0xe4, 0x76, - 0x8b, 0x94, 0xff, 0xae, 0xc2, 0x8a, 0xfc, 0x3d, 0xa1, 0xc6, 0x81, 0xd0, 0x39, 0xac, 0x50, 0x9e, - 0x93, 0x9f, 0x68, 0xaf, 0x5f, 0xf5, 0xd3, 0x4e, 0xd6, 0xbe, 0x78, 0x7f, 0x8a, 0x08, 0x5e, 0xbd, - 0xd7, 0x15, 0x84, 0x01, 0xd8, 0x5d, 0xc4, 0xbf, 0xe2, 0xc6, 0xff, 0x44, 0xd2, 0xf7, 0x83, 0x4e, - 0x71, 0xfb, 0x4a, 0xbe, 0xa2, 0xe9, 0x3c, 0x58, 0x92, 0x0b, 0xa4, 0xfa, 0x0d, 0xdd, 0x9b, 0x9c, - 0x6b, 0x42, 0x7d, 0x16, 0xf5, 0xab, 0xba, 0x8b, 0xe9, 0x2e, 0x60, 0x75, 0x1f, 0x93, 0xbe, 0xcf, - 0x35, 0x74, 0x7f, 0x9a, 0x4f, 0x3b, 0x3e, 0x6d, 0x79, 0xfa, 0xaf, 0xc1, 0xf2, 0xef, 0x55, 0xd8, - 0x94, 0xe5, 0x4d, 0x88, 0x4f, 0x51, 0xe8, 0x9f, 0x29, 0x80, 0x86, 0x3f, 0x25, 0xd0, 0x9b, 0xd7, - 0xfb, 0xb8, 0x2b, 0xee, 0x5c, 0xf3, 0x9b, 0x05, 0xfd, 0x54, 0x61, 0xda, 0xa6, 0x5f, 0x55, 0xa3, - 0x07, 0xd3, 0xaa, 0x70, 0x9e, 0xc5, 0x9b, 0xd7, 0x13, 0xef, 0xe8, 0xc7, 0xb0, 0x3a, 0x28, 0x29, - 0xd1, 0x1b, 0x13, 0x56, 0x94, 0x2a, 0x59, 0x8b, 0x0f, 0xa6, 0x8c, 0x12, 0xb5, 0xfa, 0xb9, 0x02, - 0x1b, 0xb2, 0x56, 0x5c, 0x28, 0xca, 0x3a, 0xc5, 0xb0, 0x94, 0xd4, 0x8f, 0x13, 0x76, 0x63, 0x8a, - 0x9e, 0x9d, 0xb0, 0x1b, 0xd3, 0xc4, 0x69, 0xf9, 0x97, 0x73, 0x70, 0xb3, 0xa7, 0x0c, 0x6a, 0x24, - 0x88, 0xb0, 0xcc, 0xc7, 0x13, 0xdb, 0x94, 0x49, 0x03, 0xa4, 0x5f, 0x59, 0x5b, 0xf2, 0x5c, 0x8c, - 0x29, 0xb5, 0x28, 0x6b, 0x8f, 0x21, 0x51, 0x37, 0xa1, 0x3d, 0x46, 0x29, 0xca, 0x09, 0xed, 0x31, - 0x52, 0x3b, 0xd2, 0x1a, 0x24, 0x85, 0xde, 0x84, 0x1a, 0xa4, 0xc8, 0xcc, 0x09, 0x35, 0x48, 0x53, - 0x91, 0x94, 0xe8, 0xda, 0x55, 0x89, 0xae, 0x4d, 0x49, 0xf4, 0xb0, 0xb8, 0x43, 0x9f, 0x2a, 0xb0, - 0x91, 0x7a, 0xe5, 0xa3, 0x6f, 0x4e, 0x68, 0xe9, 0xd1, 0x5a, 0xaa, 0xf8, 0xf0, 0x3a, 0xa1, 0x22, - 0xa1, 0xcf, 0x14, 0xb8, 0x99, 0x7e, 0xe5, 0xa3, 0x87, 0xd7, 0xd2, 0x09, 0x3c, 0xa5, 0xb7, 0xff, - 0x03, 0x8d, 0xb1, 0xfb, 0xde, 0xb3, 0xcb, 0x52, 0xe6, 0x8b, 0xcb, 0x52, 0xe6, 0xcb, 0xcb, 0x92, - 0xf2, 0x93, 0x4e, 0x49, 0xf9, 0x5d, 0xa7, 0xa4, 0xfc, 0xa5, 0x53, 0x52, 0x9e, 0x75, 0x4a, 0xca, - 0x3f, 0x3a, 0x25, 0xe5, 0x9f, 0x9d, 0x52, 0xe6, 0xcb, 0x4e, 0x49, 0xf9, 0xc5, 0x8b, 0x52, 0xe6, - 0xd9, 0x8b, 0x52, 0xe6, 0x8b, 0x17, 0xa5, 0xcc, 0x77, 0xa1, 0xf7, 0x7f, 0xa7, 0xc6, 0x1c, 0x53, - 0x08, 0xdf, 0xf8, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x26, 0x6f, 0xf1, 0xa9, 0x1a, 0x00, - 0x00, +func (m *ExecutionStats) GetBytesProcessed() int64 { + if m != nil { + return m.BytesProcessed + } + return 0 } -func (this *SchemaRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil +func (m *ExecutionStats) GetRecordsProcessed() int64 { + if m != nil { + return m.RecordsProcessed } + return 0 +} - that1, ok := that.(*SchemaRequest) - if !ok { - that2, ok := that.(SchemaRequest) - if ok { - that1 = &that2 - } else { - return false +type RecordExecutionResultRequest struct { + ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` + Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Types that are valid to be assigned to Result: + // *RecordExecutionResultRequest_Error + // *RecordExecutionResultRequest_ExecutionStats + Result isRecordExecutionResultRequest_Result `protobuf_oneof:"result"` +} + +func (m *RecordExecutionResultRequest) Reset() { *m = RecordExecutionResultRequest{} } +func (*RecordExecutionResultRequest) ProtoMessage() {} +func (*RecordExecutionResultRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{33} +} +func (m *RecordExecutionResultRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RecordExecutionResultRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RecordExecutionResultRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true } -func (this *SchemaResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil +func (m *RecordExecutionResultRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecordExecutionResultRequest.Merge(m, src) +} +func (m *RecordExecutionResultRequest) XXX_Size() int { + return m.Size() +} +func (m *RecordExecutionResultRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RecordExecutionResultRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RecordExecutionResultRequest proto.InternalMessageInfo + +type isRecordExecutionResultRequest_Result interface { + isRecordExecutionResultRequest_Result() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type RecordExecutionResultRequest_Error struct { + Error *statuspb.Status `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` +} +type RecordExecutionResultRequest_ExecutionStats struct { + ExecutionStats *ExecutionStats `protobuf:"bytes,4,opt,name=execution_stats,json=executionStats,proto3,oneof" json:"execution_stats,omitempty"` +} + +func (*RecordExecutionResultRequest_Error) isRecordExecutionResultRequest_Result() {} +func (*RecordExecutionResultRequest_ExecutionStats) isRecordExecutionResultRequest_Result() {} + +func (m *RecordExecutionResultRequest) GetResult() isRecordExecutionResultRequest_Result { + if m != nil { + return m.Result } + return nil +} - that1, ok := that.(*SchemaResponse) - if !ok { - that2, ok := that.(SchemaResponse) - if ok { - that1 = &that2 - } else { - return false - } +func (m *RecordExecutionResultRequest) GetScriptID() *uuidpb.UUID { + if m != nil { + return m.ScriptID } - if that1 == nil { - return this == nil - } else if this == nil { - return false + return nil +} + +func (m *RecordExecutionResultRequest) GetTimestamp() *types.Timestamp { + if m != nil { + return m.Timestamp } - if !this.Schema.Equal(that1.Schema) { - return false + return nil +} + +func (m *RecordExecutionResultRequest) GetError() *statuspb.Status { + if x, ok := m.GetResult().(*RecordExecutionResultRequest_Error); ok { + return x.Error } - return true + return nil } -func (this *AgentInfoRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil + +func (m *RecordExecutionResultRequest) GetExecutionStats() *ExecutionStats { + if x, ok := m.GetResult().(*RecordExecutionResultRequest_ExecutionStats); ok { + return x.ExecutionStats } + return nil +} - that1, ok := that.(*AgentInfoRequest) - if !ok { - that2, ok := that.(AgentInfoRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false +// XXX_OneofWrappers is for the internal use of the proto package. +func (*RecordExecutionResultRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*RecordExecutionResultRequest_Error)(nil), + (*RecordExecutionResultRequest_ExecutionStats)(nil), } - return true } -func (this *AgentInfoResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*AgentInfoResponse) - if !ok { - that2, ok := that.(AgentInfoResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Info) != len(that1.Info) { - return false - } - for i := range this.Info { - if !this.Info[i].Equal(that1.Info[i]) { - return false - } - } - return true +type RecordExecutionResultResponse struct { } -func (this *AgentMetadata) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*AgentMetadata) - if !ok { - that2, ok := that.(AgentMetadata) - if ok { - that1 = &that2 - } else { - return false +func (m *RecordExecutionResultResponse) Reset() { *m = RecordExecutionResultResponse{} } +func (*RecordExecutionResultResponse) ProtoMessage() {} +func (*RecordExecutionResultResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{34} +} +func (m *RecordExecutionResultResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RecordExecutionResultResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RecordExecutionResultResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Agent.Equal(that1.Agent) { - return false - } - if !this.Status.Equal(that1.Status) { - return false - } - if !this.CarnotInfo.Equal(that1.CarnotInfo) { - return false - } - return true } -func (this *AgentUpdatesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } +func (m *RecordExecutionResultResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecordExecutionResultResponse.Merge(m, src) +} +func (m *RecordExecutionResultResponse) XXX_Size() int { + return m.Size() +} +func (m *RecordExecutionResultResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RecordExecutionResultResponse.DiscardUnknown(m) +} - that1, ok := that.(*AgentUpdatesRequest) - if !ok { - that2, ok := that.(AgentUpdatesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.MaxUpdateInterval.Equal(that1.MaxUpdateInterval) { - return false - } - if this.MaxUpdatesPerResponse != that1.MaxUpdatesPerResponse { - return false - } - return true +var xxx_messageInfo_RecordExecutionResultResponse proto.InternalMessageInfo + +type GetAllExecutionResultsRequest struct { } -func (this *AgentUpdate) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*AgentUpdate) - if !ok { - that2, ok := that.(AgentUpdate) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.AgentID.Equal(that1.AgentID) { - return false - } - if that1.Update == nil { - if this.Update != nil { - return false +func (m *GetAllExecutionResultsRequest) Reset() { *m = GetAllExecutionResultsRequest{} } +func (*GetAllExecutionResultsRequest) ProtoMessage() {} +func (*GetAllExecutionResultsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{35} +} +func (m *GetAllExecutionResultsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetAllExecutionResultsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetAllExecutionResultsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - } else if this.Update == nil { - return false - } else if !this.Update.Equal(that1.Update) { - return false + return b[:n], nil } - return true } -func (this *AgentUpdate_Deleted) Equal(that interface{}) bool { - if that == nil { - return this == nil - } +func (m *GetAllExecutionResultsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllExecutionResultsRequest.Merge(m, src) +} +func (m *GetAllExecutionResultsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetAllExecutionResultsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllExecutionResultsRequest.DiscardUnknown(m) +} - that1, ok := that.(*AgentUpdate_Deleted) - if !ok { - that2, ok := that.(AgentUpdate_Deleted) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Deleted != that1.Deleted { - return false - } - return true +var xxx_messageInfo_GetAllExecutionResultsRequest proto.InternalMessageInfo + +type GetAllExecutionResultsResponse struct { + Results []*GetAllExecutionResultsResponse_ExecutionResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } -func (this *AgentUpdate_Agent) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*AgentUpdate_Agent) - if !ok { - that2, ok := that.(AgentUpdate_Agent) - if ok { - that1 = &that2 - } else { - return false +func (m *GetAllExecutionResultsResponse) Reset() { *m = GetAllExecutionResultsResponse{} } +func (*GetAllExecutionResultsResponse) ProtoMessage() {} +func (*GetAllExecutionResultsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{36} +} +func (m *GetAllExecutionResultsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetAllExecutionResultsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetAllExecutionResultsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Agent.Equal(that1.Agent) { - return false - } - return true } -func (this *AgentUpdate_DataInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } +func (m *GetAllExecutionResultsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllExecutionResultsResponse.Merge(m, src) +} +func (m *GetAllExecutionResultsResponse) XXX_Size() int { + return m.Size() +} +func (m *GetAllExecutionResultsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllExecutionResultsResponse.DiscardUnknown(m) +} - that1, ok := that.(*AgentUpdate_DataInfo) - if !ok { - that2, ok := that.(AgentUpdate_DataInfo) +var xxx_messageInfo_GetAllExecutionResultsResponse proto.InternalMessageInfo + +func (m *GetAllExecutionResultsResponse) GetResults() []*GetAllExecutionResultsResponse_ExecutionResult { + if m != nil { + return m.Results + } + return nil +} + +type GetAllExecutionResultsResponse_ExecutionResult struct { + ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` + Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Types that are valid to be assigned to Result: + // *GetAllExecutionResultsResponse_ExecutionResult_Error + // *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats + Result isGetAllExecutionResultsResponse_ExecutionResult_Result `protobuf_oneof:"result"` +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult) Reset() { + *m = GetAllExecutionResultsResponse_ExecutionResult{} +} +func (*GetAllExecutionResultsResponse_ExecutionResult) ProtoMessage() {} +func (*GetAllExecutionResultsResponse_ExecutionResult) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{36, 0} +} +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.Merge(m, src) +} +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Size() int { + return m.Size() +} +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult proto.InternalMessageInfo + +type isGetAllExecutionResultsResponse_ExecutionResult_Result interface { + isGetAllExecutionResultsResponse_ExecutionResult_Result() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type GetAllExecutionResultsResponse_ExecutionResult_Error struct { + Error *statuspb.Status `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` +} +type GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats struct { + ExecutionStats *ExecutionStats `protobuf:"bytes,4,opt,name=execution_stats,json=executionStats,proto3,oneof" json:"execution_stats,omitempty"` +} + +func (*GetAllExecutionResultsResponse_ExecutionResult_Error) isGetAllExecutionResultsResponse_ExecutionResult_Result() { +} +func (*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) isGetAllExecutionResultsResponse_ExecutionResult_Result() { +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetResult() isGetAllExecutionResultsResponse_ExecutionResult_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetScriptID() *uuidpb.UUID { + if m != nil { + return m.ScriptID + } + return nil +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetTimestamp() *types.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetError() *statuspb.Status { + if x, ok := m.GetResult().(*GetAllExecutionResultsResponse_ExecutionResult_Error); ok { + return x.Error + } + return nil +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetExecutionStats() *ExecutionStats { + if x, ok := m.GetResult().(*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats); ok { + return x.ExecutionStats + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*GetAllExecutionResultsResponse_ExecutionResult) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*GetAllExecutionResultsResponse_ExecutionResult_Error)(nil), + (*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats)(nil), + } +} + +func init() { + proto.RegisterType((*SchemaRequest)(nil), "px.vizier.services.metadata.SchemaRequest") + proto.RegisterType((*SchemaResponse)(nil), "px.vizier.services.metadata.SchemaResponse") + proto.RegisterType((*AgentInfoRequest)(nil), "px.vizier.services.metadata.AgentInfoRequest") + proto.RegisterType((*AgentInfoResponse)(nil), "px.vizier.services.metadata.AgentInfoResponse") + proto.RegisterType((*AgentMetadata)(nil), "px.vizier.services.metadata.AgentMetadata") + proto.RegisterType((*AgentUpdatesRequest)(nil), "px.vizier.services.metadata.AgentUpdatesRequest") + proto.RegisterType((*AgentUpdate)(nil), "px.vizier.services.metadata.AgentUpdate") + proto.RegisterType((*AgentUpdatesResponse)(nil), "px.vizier.services.metadata.AgentUpdatesResponse") + proto.RegisterType((*WithPrefixKeyRequest)(nil), "px.vizier.services.metadata.WithPrefixKeyRequest") + proto.RegisterType((*WithPrefixKeyResponse)(nil), "px.vizier.services.metadata.WithPrefixKeyResponse") + proto.RegisterType((*WithPrefixKeyResponse_KV)(nil), "px.vizier.services.metadata.WithPrefixKeyResponse.KV") + proto.RegisterType((*RegisterFileSourceRequest)(nil), "px.vizier.services.metadata.RegisterFileSourceRequest") + proto.RegisterType((*RegisterFileSourceResponse)(nil), "px.vizier.services.metadata.RegisterFileSourceResponse") + proto.RegisterType((*RegisterFileSourceResponse_FileSourceStatus)(nil), "px.vizier.services.metadata.RegisterFileSourceResponse.FileSourceStatus") + proto.RegisterType((*GetFileSourceInfoRequest)(nil), "px.vizier.services.metadata.GetFileSourceInfoRequest") + proto.RegisterType((*GetFileSourceInfoResponse)(nil), "px.vizier.services.metadata.GetFileSourceInfoResponse") + proto.RegisterType((*GetFileSourceInfoResponse_FileSourceState)(nil), "px.vizier.services.metadata.GetFileSourceInfoResponse.FileSourceState") + proto.RegisterType((*RemoveFileSourceRequest)(nil), "px.vizier.services.metadata.RemoveFileSourceRequest") + proto.RegisterType((*RemoveFileSourceResponse)(nil), "px.vizier.services.metadata.RemoveFileSourceResponse") + proto.RegisterType((*RegisterTracepointRequest)(nil), "px.vizier.services.metadata.RegisterTracepointRequest") + proto.RegisterType((*RegisterTracepointRequest_TracepointRequest)(nil), "px.vizier.services.metadata.RegisterTracepointRequest.TracepointRequest") + proto.RegisterType((*RegisterTracepointResponse)(nil), "px.vizier.services.metadata.RegisterTracepointResponse") + proto.RegisterType((*RegisterTracepointResponse_TracepointStatus)(nil), "px.vizier.services.metadata.RegisterTracepointResponse.TracepointStatus") + proto.RegisterType((*GetTracepointInfoRequest)(nil), "px.vizier.services.metadata.GetTracepointInfoRequest") + proto.RegisterType((*GetTracepointInfoResponse)(nil), "px.vizier.services.metadata.GetTracepointInfoResponse") + proto.RegisterType((*GetTracepointInfoResponse_TracepointState)(nil), "px.vizier.services.metadata.GetTracepointInfoResponse.TracepointState") + proto.RegisterType((*RemoveTracepointRequest)(nil), "px.vizier.services.metadata.RemoveTracepointRequest") + proto.RegisterType((*RemoveTracepointResponse)(nil), "px.vizier.services.metadata.RemoveTracepointResponse") + proto.RegisterType((*UpdateConfigRequest)(nil), "px.vizier.services.metadata.UpdateConfigRequest") + proto.RegisterType((*UpdateConfigResponse)(nil), "px.vizier.services.metadata.UpdateConfigResponse") + proto.RegisterType((*GetScriptsRequest)(nil), "px.vizier.services.metadata.GetScriptsRequest") + proto.RegisterType((*GetScriptsResponse)(nil), "px.vizier.services.metadata.GetScriptsResponse") + proto.RegisterMapType((map[string]*cvmsgspb.CronScript)(nil), "px.vizier.services.metadata.GetScriptsResponse.ScriptsEntry") + proto.RegisterType((*AddOrUpdateScriptRequest)(nil), "px.vizier.services.metadata.AddOrUpdateScriptRequest") + proto.RegisterType((*AddOrUpdateScriptResponse)(nil), "px.vizier.services.metadata.AddOrUpdateScriptResponse") + proto.RegisterType((*DeleteScriptRequest)(nil), "px.vizier.services.metadata.DeleteScriptRequest") + proto.RegisterType((*DeleteScriptResponse)(nil), "px.vizier.services.metadata.DeleteScriptResponse") + proto.RegisterType((*SetScriptsRequest)(nil), "px.vizier.services.metadata.SetScriptsRequest") + proto.RegisterMapType((map[string]*cvmsgspb.CronScript)(nil), "px.vizier.services.metadata.SetScriptsRequest.ScriptsEntry") + proto.RegisterType((*SetScriptsResponse)(nil), "px.vizier.services.metadata.SetScriptsResponse") + proto.RegisterType((*ExecutionStats)(nil), "px.vizier.services.metadata.ExecutionStats") + proto.RegisterType((*RecordExecutionResultRequest)(nil), "px.vizier.services.metadata.RecordExecutionResultRequest") + proto.RegisterType((*RecordExecutionResultResponse)(nil), "px.vizier.services.metadata.RecordExecutionResultResponse") + proto.RegisterType((*GetAllExecutionResultsRequest)(nil), "px.vizier.services.metadata.GetAllExecutionResultsRequest") + proto.RegisterType((*GetAllExecutionResultsResponse)(nil), "px.vizier.services.metadata.GetAllExecutionResultsResponse") + proto.RegisterType((*GetAllExecutionResultsResponse_ExecutionResult)(nil), "px.vizier.services.metadata.GetAllExecutionResultsResponse.ExecutionResult") +} + +func init() { + proto.RegisterFile("src/vizier/services/metadata/metadatapb/service.proto", fileDescriptor_bfe4468195647430) +} + +var fileDescriptor_bfe4468195647430 = []byte{ + // 2204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xe7, 0x92, 0xfa, 0xa0, 0x9e, 0x64, 0x7d, 0x8c, 0x24, 0x47, 0x62, 0x1a, 0xca, 0x59, 0xb4, + 0x8d, 0x61, 0xc5, 0xbb, 0xb1, 0x1a, 0x59, 0xa9, 0x93, 0x06, 0xb1, 0xc4, 0x58, 0x22, 0xe4, 0x24, + 0xea, 0x52, 0x56, 0x81, 0x5e, 0x88, 0xe5, 0xee, 0x90, 0xde, 0x9a, 0xfb, 0xd1, 0xdd, 0xa5, 0x2a, + 0x15, 0x05, 0x5a, 0x14, 0xe8, 0xad, 0x08, 0x9a, 0x43, 0x0b, 0xe4, 0xd6, 0x8f, 0x4b, 0x7b, 0x6e, + 0xef, 0x45, 0x7b, 0xea, 0xd1, 0xa7, 0x22, 0x28, 0x0a, 0xa3, 0xa6, 0x2f, 0x3d, 0x15, 0xf9, 0x13, + 0x8a, 0xf9, 0x5a, 0xee, 0x92, 0x4b, 0x2e, 0xa9, 0x16, 0x39, 0xe5, 0xa4, 0xe1, 0x9b, 0xf7, 0xde, + 0xbc, 0xf9, 0xfd, 0xde, 0xbc, 0x7d, 0x33, 0x10, 0xec, 0x06, 0xbe, 0xa1, 0x9e, 0x5b, 0x3f, 0xb4, + 0xb0, 0xaf, 0x06, 0xd8, 0x3f, 0xb7, 0x0c, 0x1c, 0xa8, 0x36, 0x0e, 0x75, 0x53, 0x0f, 0xf5, 0x68, + 0xe0, 0x35, 0xc4, 0xa4, 0xe2, 0xf9, 0x6e, 0xe8, 0xa2, 0x97, 0xbd, 0x0b, 0x85, 0x59, 0x29, 0xc2, + 0x4a, 0x11, 0xca, 0xa5, 0xb5, 0x96, 0xdb, 0x72, 0xa9, 0x9e, 0x4a, 0x46, 0xcc, 0xa4, 0x54, 0x6e, + 0xb9, 0x6e, 0xab, 0x8d, 0x55, 0xfa, 0xab, 0xd1, 0x69, 0xaa, 0x66, 0xc7, 0xd7, 0x43, 0xcb, 0x75, + 0xf8, 0xfc, 0x56, 0xff, 0x7c, 0x68, 0xd9, 0x38, 0x08, 0x75, 0xdb, 0x13, 0x0a, 0x24, 0x54, 0xdd, + 0xb3, 0x98, 0x86, 0xda, 0xe9, 0x58, 0xa6, 0xd7, 0xa0, 0x7f, 0xb8, 0xc2, 0x1e, 0x51, 0x30, 0x74, + 0xdf, 0x71, 0x43, 0xd5, 0x6b, 0xeb, 0x8e, 0x83, 0x7d, 0xd5, 0xb4, 0x82, 0xd0, 0xb7, 0x1a, 0x9d, + 0x10, 0x13, 0xe5, 0xd8, 0xaf, 0x3a, 0xd1, 0xe0, 0x86, 0xdf, 0x4a, 0x33, 0xbc, 0x74, 0x74, 0xdb, + 0x32, 0xea, 0xa1, 0xaf, 0x1b, 0x96, 0xd3, 0x52, 0x2d, 0x5f, 0x6d, 0xbb, 0x2d, 0xcb, 0xd0, 0xdb, + 0x5e, 0x43, 0x8c, 0xb8, 0xb9, 0x9a, 0x62, 0xde, 0xb4, 0xda, 0xb8, 0x1e, 0xb8, 0x1d, 0xdf, 0xc0, + 0x31, 0x53, 0x6e, 0xf0, 0x35, 0x6a, 0xe0, 0xda, 0xb6, 0xeb, 0xa8, 0x0d, 0x3d, 0xc0, 0x6a, 0x10, + 0xea, 0x61, 0x27, 0x20, 0x28, 0xd3, 0x41, 0x5c, 0x2d, 0xd4, 0x1b, 0xc4, 0x53, 0xe8, 0xfa, 0x58, + 0x0d, 0x8c, 0xc7, 0xd8, 0xa6, 0x64, 0xd0, 0x01, 0x57, 0xbb, 0x1d, 0xa3, 0xd0, 0xc6, 0x41, 0xa0, + 0xb7, 0x28, 0x85, 0x6c, 0xe0, 0x35, 0xa2, 0x21, 0x57, 0x57, 0xd2, 0x18, 0x0f, 0x1e, 0xeb, 0x3e, + 0x36, 0x55, 0xbd, 0x85, 0x9d, 0xd0, 0x6b, 0xb0, 0xbf, 0x5c, 0xff, 0x06, 0xd1, 0xe7, 0xf3, 0xc6, + 0xb9, 0x1d, 0xb4, 0x88, 0x4f, 0x36, 0x60, 0x1a, 0xf2, 0x12, 0x5c, 0xab, 0xd1, 0x80, 0x34, 0xfc, + 0xfd, 0x0e, 0x0e, 0x42, 0xb9, 0x0a, 0x8b, 0x42, 0x10, 0x78, 0xae, 0x13, 0x60, 0xb4, 0x07, 0x33, + 0x2c, 0xe6, 0x8d, 0xfc, 0x0d, 0xe9, 0xe6, 0xfc, 0xce, 0x96, 0xe2, 0x5d, 0x28, 0xb1, 0xad, 0x29, + 0x62, 0x6b, 0x0a, 0x37, 0xe4, 0xea, 0x32, 0x82, 0xe5, 0xfb, 0x24, 0x98, 0xaa, 0xd3, 0x74, 0x85, + 0xfb, 0x1a, 0xac, 0xc4, 0x64, 0x7c, 0x85, 0x77, 0x61, 0xca, 0x72, 0x9a, 0xee, 0x86, 0x74, 0xa3, + 0x70, 0x73, 0x7e, 0xe7, 0x96, 0x32, 0x22, 0x41, 0x15, 0x6a, 0xfd, 0x01, 0xff, 0xa5, 0x51, 0x3b, + 0xf9, 0xb9, 0x04, 0xd7, 0x12, 0x72, 0xf4, 0x0e, 0x4c, 0x53, 0x1c, 0x36, 0x24, 0x1a, 0xf2, 0xd7, + 0xd3, 0x5c, 0x32, 0x5c, 0x14, 0x86, 0x17, 0x35, 0xd7, 0x98, 0x11, 0xaa, 0xc0, 0x0c, 0x23, 0x93, + 0xef, 0xf8, 0xf5, 0xf1, 0xcc, 0x6b, 0xd4, 0x46, 0xe3, 0xb6, 0xe8, 0x21, 0xcc, 0xb3, 0xc4, 0xaa, + 0xd3, 0xcd, 0x15, 0xa8, 0xab, 0x6d, 0xe2, 0x8a, 0x89, 0x15, 0x9e, 0x6f, 0x4a, 0x22, 0xcf, 0x95, + 0x03, 0x3a, 0x49, 0xf1, 0x01, 0x23, 0x1a, 0xcb, 0x9f, 0x4a, 0xb0, 0x4a, 0x57, 0x79, 0xe4, 0x99, + 0x7a, 0x88, 0x03, 0x0e, 0x28, 0xaa, 0xc2, 0xaa, 0xad, 0x5f, 0xd4, 0x3b, 0x54, 0x5a, 0xb7, 0x9c, + 0x10, 0xfb, 0xe7, 0x7a, 0x9b, 0xef, 0x7b, 0x53, 0x61, 0x07, 0x53, 0x11, 0x07, 0x53, 0xa9, 0xf0, + 0x83, 0xab, 0xad, 0xd8, 0xfa, 0x05, 0x73, 0x55, 0xe5, 0x36, 0x68, 0x0f, 0x36, 0x7a, 0xae, 0x82, + 0xba, 0x87, 0xfd, 0xba, 0xcf, 0x29, 0xa2, 0x40, 0x4c, 0x6b, 0xeb, 0x91, 0x51, 0x70, 0x82, 0x7d, + 0xc1, 0x9f, 0xfc, 0x1f, 0x09, 0xe6, 0x63, 0xb1, 0xa1, 0x3d, 0x28, 0x52, 0x58, 0xea, 0x96, 0xc9, + 0x03, 0x59, 0x22, 0xdb, 0x66, 0xa7, 0x5e, 0x79, 0xf4, 0xa8, 0x5a, 0xd9, 0x9f, 0xef, 0x3e, 0xdb, + 0x9a, 0x65, 0x99, 0x50, 0xd1, 0x66, 0xa9, 0x76, 0xd5, 0x44, 0x25, 0x98, 0x35, 0x71, 0x1b, 0x87, + 0xd8, 0xa4, 0x0b, 0x16, 0x8f, 0x72, 0x9a, 0x10, 0xa0, 0x77, 0x05, 0xa5, 0x85, 0x49, 0x28, 0x3d, + 0xca, 0x09, 0x52, 0xdf, 0x83, 0x39, 0x92, 0x1a, 0x8c, 0x8c, 0x29, 0xea, 0xe3, 0xd5, 0x98, 0x8f, + 0xe8, 0xa4, 0x51, 0xb3, 0x8a, 0x1e, 0xea, 0x04, 0xf6, 0xa3, 0x9c, 0x56, 0x34, 0xf9, 0x78, 0xbf, + 0x08, 0x33, 0x0c, 0x1b, 0xf9, 0x93, 0x3c, 0xac, 0x25, 0xc9, 0xe0, 0x99, 0xfc, 0x01, 0x5c, 0x63, + 0x3b, 0xe7, 0x20, 0xf2, 0x94, 0xbe, 0x99, 0x9d, 0xd2, 0xcc, 0x93, 0xb6, 0xa0, 0xc7, 0xdc, 0xa2, + 0x13, 0xe1, 0x8e, 0x9d, 0x28, 0x92, 0x8f, 0x85, 0xb1, 0x92, 0x88, 0x9d, 0x44, 0x9a, 0x44, 0xcc, + 0x23, 0x13, 0x04, 0x68, 0x07, 0xd6, 0x13, 0x1e, 0x79, 0xa0, 0x26, 0x45, 0xb5, 0xa8, 0xad, 0xc6, + 0x95, 0x59, 0x14, 0x26, 0xfa, 0x2a, 0x2c, 0x62, 0xc7, 0xac, 0xbb, 0xcd, 0xfa, 0x39, 0xf6, 0x03, + 0xcb, 0x75, 0x28, 0x7c, 0x45, 0x6d, 0x01, 0x3b, 0xe6, 0x47, 0xcd, 0x33, 0x26, 0x93, 0x2b, 0xb0, + 0xf6, 0x1d, 0x2b, 0x7c, 0x7c, 0xe2, 0xe3, 0xa6, 0x75, 0x71, 0x8c, 0x2f, 0x45, 0x82, 0x5e, 0x87, + 0x19, 0x8f, 0xca, 0x68, 0x2a, 0xcc, 0x69, 0xfc, 0x17, 0x5a, 0x83, 0x69, 0x9a, 0x95, 0x94, 0xe9, + 0x39, 0x8d, 0xfd, 0x90, 0x3f, 0x96, 0x60, 0xbd, 0xcf, 0x0d, 0x87, 0xf6, 0x10, 0x0a, 0x4f, 0xce, + 0x05, 0xa0, 0xbb, 0x23, 0x01, 0x4d, 0x75, 0xa0, 0x1c, 0x9f, 0x69, 0xc4, 0x43, 0xe9, 0x75, 0xc8, + 0x1f, 0x9f, 0xa1, 0x65, 0x28, 0x3c, 0xc1, 0x97, 0x3c, 0x26, 0x32, 0x24, 0x01, 0x9d, 0xeb, 0xed, + 0x0e, 0xcb, 0xf5, 0x05, 0x8d, 0xfd, 0x90, 0x5d, 0xd8, 0xd4, 0x70, 0xcb, 0x0a, 0x42, 0xec, 0x3f, + 0xb0, 0xda, 0xb8, 0x46, 0x3f, 0x0b, 0x62, 0x6f, 0x1a, 0x14, 0x7d, 0x36, 0x14, 0x81, 0xdd, 0x4d, + 0xa1, 0x26, 0xf6, 0x3d, 0x51, 0x2c, 0x5f, 0xe9, 0xb9, 0xa9, 0x60, 0xaf, 0xed, 0x5e, 0xda, 0xa4, + 0xf2, 0x44, 0x7e, 0xe4, 0x3f, 0xe5, 0xa1, 0x94, 0xb6, 0x22, 0x87, 0xe1, 0x09, 0x2c, 0xc4, 0xfc, + 0x89, 0x65, 0x8f, 0x46, 0xe2, 0x31, 0xdc, 0x5d, 0x2c, 0x18, 0x5e, 0xbd, 0xe6, 0x9b, 0x91, 0x24, + 0x40, 0xdb, 0x7d, 0x85, 0x70, 0x95, 0x2c, 0x23, 0x3e, 0x78, 0x4a, 0xb2, 0xde, 0x95, 0x7e, 0x04, + 0xcb, 0xfd, 0xde, 0x62, 0x0e, 0xa4, 0x4c, 0x07, 0xe8, 0x35, 0xc8, 0x5b, 0x26, 0x5f, 0x69, 0xa0, + 0x60, 0xcc, 0x74, 0x9f, 0x6d, 0xe5, 0xab, 0x15, 0x2d, 0x6f, 0x99, 0x08, 0xc1, 0x94, 0xa3, 0xdb, + 0x98, 0xe6, 0xec, 0x9c, 0x46, 0xc7, 0xf2, 0x03, 0xd8, 0x38, 0xc4, 0x61, 0x2f, 0x80, 0xd8, 0x47, + 0x07, 0xdd, 0x82, 0x82, 0x65, 0x0a, 0xa8, 0x06, 0x3c, 0xcf, 0x76, 0x9f, 0x6d, 0x15, 0xaa, 0x95, + 0x40, 0x23, 0x4a, 0xf2, 0x6f, 0x0b, 0xb0, 0x99, 0xe2, 0x88, 0xa3, 0x6f, 0xa5, 0xa2, 0xff, 0x60, + 0x24, 0xfa, 0x43, 0xbd, 0xf5, 0x81, 0x8f, 0x13, 0xd8, 0x97, 0x3e, 0xcd, 0xc3, 0x52, 0x9f, 0x02, + 0x47, 0x48, 0xca, 0x46, 0xe8, 0x0e, 0x4c, 0x13, 0x50, 0x59, 0x2e, 0x2f, 0xee, 0xbc, 0x9c, 0x80, + 0xfd, 0xa1, 0xd5, 0xc4, 0x07, 0x97, 0x46, 0x9b, 0xaf, 0xca, 0x34, 0x91, 0x0a, 0x45, 0xa6, 0x81, + 0x83, 0x8d, 0x02, 0xdd, 0x56, 0x2a, 0x59, 0x91, 0x52, 0xc4, 0xc2, 0x54, 0x8f, 0x05, 0xb4, 0x0f, + 0x8b, 0xf8, 0xc2, 0xc3, 0x06, 0x69, 0xd2, 0x58, 0x00, 0xd3, 0xd9, 0x01, 0x5c, 0x13, 0x26, 0x6c, + 0x93, 0xaf, 0xc2, 0x02, 0x2b, 0x4e, 0x75, 0xe2, 0x32, 0xd8, 0x98, 0xb9, 0x51, 0xb8, 0x39, 0xa7, + 0xcd, 0x33, 0xd9, 0x87, 0x44, 0x24, 0xab, 0xf0, 0x92, 0x86, 0x6d, 0xf7, 0x1c, 0x0f, 0x1e, 0xc9, + 0x35, 0x98, 0x66, 0x66, 0x12, 0x35, 0x63, 0x3f, 0xe4, 0x43, 0xd8, 0x18, 0x34, 0xe0, 0x9c, 0x4e, + 0x92, 0xa3, 0xf2, 0x3f, 0xf2, 0xbd, 0x7a, 0x70, 0xea, 0xeb, 0x06, 0xf6, 0x5c, 0xcb, 0x09, 0xc5, + 0xe2, 0xe6, 0x40, 0x3d, 0x18, 0xef, 0x60, 0x0e, 0x78, 0x52, 0x06, 0x24, 0xbd, 0x0a, 0x51, 0xfa, + 0xbb, 0x04, 0x2b, 0x83, 0x6b, 0xff, 0x00, 0xd6, 0xc3, 0x48, 0x58, 0x37, 0xa3, 0xd2, 0xc2, 0x77, + 0xb5, 0x9f, 0xf6, 0xcd, 0x48, 0xf6, 0xc9, 0xa4, 0x38, 0x89, 0x66, 0xb7, 0xe7, 0x3f, 0x56, 0xa4, + 0xd6, 0xc2, 0x14, 0x69, 0x94, 0x07, 0xf9, 0x58, 0x1e, 0xbc, 0x09, 0x85, 0x30, 0x6c, 0xf3, 0x4f, + 0xf5, 0xf0, 0x2e, 0x84, 0x9d, 0xbd, 0xd3, 0xd3, 0x87, 0x1a, 0x51, 0x97, 0xff, 0x18, 0x2b, 0x7d, + 0xf1, 0x0d, 0x72, 0xa2, 0xbe, 0x07, 0xf3, 0xbd, 0x00, 0xae, 0x0e, 0x30, 0x3f, 0x7c, 0x3d, 0x91, + 0xa8, 0x7c, 0x31, 0xe7, 0x13, 0x57, 0xbe, 0x7e, 0x6f, 0x5f, 0x78, 0xe5, 0xeb, 0x05, 0x70, 0xd5, + 0xca, 0xf7, 0x1b, 0x56, 0xf9, 0xfa, 0x1d, 0x71, 0xf0, 0x1f, 0xa7, 0x81, 0x9f, 0x59, 0xf8, 0xd2, + 0x9d, 0xf5, 0x61, 0x8f, 0x13, 0xd0, 0xd3, 0xc2, 0xd7, 0xa7, 0xf0, 0x65, 0xe1, 0xeb, 0x2f, 0x7c, + 0x83, 0xe7, 0x3f, 0xa3, 0xf0, 0xa5, 0x9c, 0xa7, 0x89, 0x0a, 0x9f, 0x01, 0xab, 0xac, 0x1f, 0x3c, + 0x70, 0x9d, 0xa6, 0xd5, 0x12, 0xab, 0x66, 0xb4, 0x51, 0x73, 0xbc, 0x8d, 0x22, 0x3d, 0x24, 0xeb, + 0x3b, 0x3d, 0xd7, 0xac, 0xc7, 0x52, 0x98, 0x75, 0xa7, 0x27, 0xae, 0x49, 0xf6, 0x27, 0x1f, 0xc0, + 0x5a, 0x72, 0x91, 0xab, 0x44, 0xba, 0x0a, 0x2b, 0x87, 0x38, 0xac, 0x19, 0xbe, 0xe5, 0x85, 0xe2, + 0x9a, 0x24, 0xff, 0x45, 0x02, 0x14, 0x97, 0x72, 0xc7, 0x67, 0x30, 0x1b, 0x30, 0x11, 0xcf, 0xe8, + 0x77, 0xb2, 0x32, 0xba, 0xcf, 0x83, 0xc2, 0x7f, 0xbf, 0xef, 0x84, 0xfe, 0xa5, 0x26, 0x9c, 0x95, + 0x6a, 0xb0, 0x10, 0x9f, 0x48, 0x81, 0xe9, 0x76, 0x1c, 0xa6, 0xf9, 0x9d, 0x97, 0x68, 0x79, 0xe6, + 0x57, 0x74, 0xe5, 0xc0, 0x77, 0x1d, 0x66, 0xcf, 0xf1, 0xbb, 0x97, 0x7f, 0x4b, 0x92, 0x8f, 0x61, + 0xe3, 0xbe, 0x69, 0x7e, 0xe4, 0x33, 0x88, 0xf8, 0x3c, 0xe7, 0x41, 0x25, 0x97, 0x74, 0x22, 0xe0, + 0x08, 0x0d, 0xf5, 0xc7, 0xd5, 0xe4, 0x97, 0x61, 0x33, 0xc5, 0x19, 0xbf, 0xd0, 0x7d, 0x1b, 0x56, + 0x2b, 0xf4, 0xda, 0x95, 0x5c, 0xe4, 0x1e, 0xcc, 0x31, 0xeb, 0x11, 0x17, 0xbb, 0x85, 0xee, 0xb3, + 0xad, 0x22, 0x33, 0xab, 0x56, 0xb4, 0x22, 0xd3, 0xaf, 0x9a, 0xf2, 0x75, 0x58, 0x4b, 0xba, 0xe4, + 0x4b, 0xfd, 0x59, 0x82, 0x95, 0x5a, 0x3f, 0x5d, 0xe8, 0x51, 0x3f, 0x2f, 0x6f, 0x8f, 0xe4, 0x65, + 0xc0, 0xc1, 0x17, 0x49, 0xcb, 0x1a, 0xa0, 0xda, 0x40, 0x5e, 0xc8, 0x7f, 0x95, 0x60, 0xf1, 0xfd, + 0x0b, 0x6c, 0x74, 0xc8, 0x77, 0x8e, 0x64, 0x68, 0x80, 0x6e, 0xc1, 0x0a, 0x16, 0x92, 0x7a, 0x68, + 0xd9, 0xb8, 0xee, 0xb0, 0x84, 0x2e, 0x68, 0x4b, 0xd1, 0xc4, 0xa9, 0x65, 0xe3, 0x0f, 0x03, 0xa4, + 0xc0, 0xaa, 0xe1, 0xda, 0x9e, 0xd5, 0xd6, 0x13, 0xda, 0x79, 0xaa, 0xbd, 0x12, 0x9b, 0xe2, 0xfa, + 0xaf, 0xc1, 0x52, 0xe3, 0x92, 0xde, 0xda, 0x7d, 0xd7, 0xc0, 0x41, 0xc0, 0x6f, 0x74, 0x05, 0x6d, + 0x91, 0x8a, 0x4f, 0x84, 0x14, 0x6d, 0xc3, 0x8a, 0x8f, 0x0d, 0xd7, 0x37, 0xe3, 0xaa, 0x53, 0x54, + 0x75, 0x99, 0x4f, 0x44, 0xca, 0xf2, 0xef, 0xf2, 0xf0, 0x15, 0x8d, 0x0a, 0xa3, 0xad, 0x68, 0x38, + 0xe8, 0xb4, 0xff, 0x1f, 0x19, 0x81, 0xde, 0x82, 0xb9, 0xe8, 0x99, 0x90, 0xc3, 0x5d, 0x1a, 0xe8, + 0x14, 0x4e, 0x85, 0x86, 0xd6, 0x53, 0x46, 0xdb, 0x30, 0x8d, 0x7d, 0xdf, 0xf5, 0x79, 0x7f, 0x91, + 0x56, 0x0d, 0xc8, 0xbd, 0x9f, 0xea, 0xa0, 0x33, 0xe8, 0x81, 0x4b, 0x4b, 0x73, 0xc0, 0x6f, 0xff, + 0xdb, 0x23, 0x53, 0x2a, 0xc9, 0xdd, 0x51, 0x4e, 0x5b, 0xc4, 0x09, 0xc9, 0x7e, 0x11, 0x66, 0x7c, + 0x8a, 0x85, 0xbc, 0x05, 0xaf, 0x0c, 0x01, 0x89, 0xe7, 0xc2, 0x16, 0xbc, 0x72, 0x88, 0xc3, 0xfb, + 0xed, 0x76, 0x9f, 0x42, 0x54, 0x9d, 0x7e, 0x5d, 0x80, 0xf2, 0x30, 0x0d, 0x5e, 0xa9, 0x30, 0xcc, + 0xb2, 0xe5, 0xc4, 0x89, 0x38, 0xce, 0xaa, 0x54, 0x23, 0xbc, 0x29, 0xfd, 0x91, 0x0a, 0xdf, 0xa5, + 0x5f, 0xe5, 0x61, 0xa9, 0x6f, 0xf2, 0x4b, 0x92, 0x3b, 0xed, 0x70, 0xe7, 0x9f, 0x05, 0x58, 0x12, + 0xcf, 0x8b, 0x35, 0xe6, 0x08, 0x5d, 0xc0, 0x12, 0xc1, 0x39, 0xfe, 0x62, 0xf3, 0xc6, 0xb8, 0x2f, + 0x3d, 0x82, 0xfb, 0xd2, 0x9d, 0x09, 0x2c, 0x18, 0x7b, 0x6f, 0x48, 0x08, 0x03, 0xd0, 0x6f, 0x11, + 0x7b, 0xd4, 0x19, 0xfd, 0x62, 0x9a, 0x78, 0xdf, 0x2d, 0x6d, 0x8f, 0xa5, 0xcb, 0x93, 0xce, 0x86, + 0x05, 0xb1, 0x41, 0xd2, 0xbf, 0xa1, 0xdb, 0xd9, 0xb1, 0xc6, 0xba, 0xcf, 0x92, 0x32, 0xae, 0x3a, + 0x5f, 0xee, 0x12, 0x96, 0x0f, 0x71, 0x98, 0x78, 0xbd, 0x41, 0x77, 0x26, 0x79, 0xe9, 0x61, 0xcb, + 0xee, 0x4c, 0xfe, 0x38, 0xb4, 0xf3, 0x87, 0x02, 0x6c, 0x0a, 0x7a, 0x63, 0xb7, 0x6e, 0x4e, 0xf4, + 0xcf, 0x24, 0x40, 0x83, 0x8f, 0x28, 0xe8, 0xee, 0xc4, 0xaf, 0x2e, 0x2c, 0xc0, 0xbd, 0x2b, 0xbe, + 0xd6, 0xa0, 0x9f, 0x4a, 0xb4, 0xb7, 0x49, 0x3e, 0x27, 0xa0, 0xdd, 0x49, 0x9f, 0x1f, 0x58, 0x14, + 0x77, 0xaf, 0xf6, 0x6a, 0x81, 0x7e, 0x0c, 0xcb, 0xfd, 0x77, 0x69, 0xf4, 0x66, 0xc6, 0x8e, 0x52, + 0xef, 0xea, 0xa5, 0xdd, 0x09, 0xad, 0x52, 0xb8, 0x8a, 0x5d, 0x14, 0x52, 0xb8, 0xea, 0xcd, 0x8e, + 0xc9, 0xd5, 0x40, 0x5b, 0x3d, 0x26, 0x57, 0x29, 0xdd, 0x35, 0xe7, 0x2a, 0x79, 0x03, 0xca, 0xe6, + 0x2a, 0xf5, 0x1e, 0x97, 0xcd, 0xd5, 0x90, 0x5b, 0x5b, 0xc4, 0x55, 0x0c, 0x89, 0x71, 0xb8, 0x1a, + 0xc4, 0x61, 0x77, 0x42, 0x2b, 0xce, 0xd5, 0xcf, 0x25, 0x58, 0x17, 0x5c, 0xb1, 0xa6, 0x5e, 0xf0, + 0x14, 0xc0, 0x42, 0xbc, 0xd7, 0xcf, 0xa8, 0x9c, 0x29, 0x77, 0x8f, 0x8c, 0xca, 0x99, 0x76, 0x91, + 0xd8, 0xf9, 0xe5, 0x0c, 0x5c, 0xef, 0x75, 0x71, 0xb5, 0xd0, 0xf5, 0xa3, 0x33, 0x6e, 0xf3, 0x92, + 0x4a, 0xdb, 0x38, 0xa4, 0x8c, 0x7d, 0x0f, 0x60, 0xb1, 0xa8, 0x13, 0xde, 0x1b, 0x68, 0x7a, 0x0c, + 0x34, 0xe0, 0x19, 0xe9, 0x31, 0xac, 0xfb, 0xcf, 0x48, 0x8f, 0xa1, 0x7d, 0x3e, 0xe1, 0x20, 0xde, + 0x94, 0x67, 0x70, 0x90, 0x72, 0x25, 0xc8, 0xe0, 0x20, 0xad, 0xe3, 0x27, 0x40, 0xd7, 0xc6, 0x05, + 0xba, 0x36, 0x21, 0xd0, 0x83, 0x8d, 0x38, 0xfa, 0x58, 0x82, 0xf5, 0xd4, 0xf6, 0x0c, 0x7d, 0x33, + 0x23, 0xa5, 0x87, 0xf7, 0xbd, 0xa5, 0x7b, 0x57, 0x31, 0xe5, 0x01, 0x7d, 0x22, 0xc1, 0xf5, 0xf4, + 0xf6, 0x0c, 0xdd, 0xbb, 0x52, 0x4f, 0xc7, 0x42, 0x7a, 0xfb, 0x7f, 0xe8, 0x07, 0xf7, 0xdf, 0x7b, + 0xfa, 0xbc, 0x9c, 0xfb, 0xec, 0x79, 0x39, 0xf7, 0xf9, 0xf3, 0xb2, 0xf4, 0x93, 0x6e, 0x59, 0xfa, + 0x7d, 0xb7, 0x2c, 0xfd, 0xad, 0x5b, 0x96, 0x9e, 0x76, 0xcb, 0xd2, 0xbf, 0xba, 0x65, 0xe9, 0xdf, + 0xdd, 0x72, 0xee, 0xf3, 0x6e, 0x59, 0xfa, 0xc5, 0x8b, 0x72, 0xee, 0xe9, 0x8b, 0x72, 0xee, 0xb3, + 0x17, 0xe5, 0xdc, 0x77, 0xa1, 0xf7, 0x3f, 0x06, 0x8d, 0x19, 0xda, 0xcd, 0x7d, 0xe3, 0xbf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xa6, 0x68, 0x46, 0x22, 0x95, 0x20, 0x00, 0x00, +} + +func (this *SchemaRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SchemaRequest) + if !ok { + that2, ok := that.(SchemaRequest) if ok { that1 = &that2 } else { @@ -2325,19 +2524,16 @@ func (this *AgentUpdate_DataInfo) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.DataInfo.Equal(that1.DataInfo) { - return false - } return true } -func (this *AgentUpdatesResponse) Equal(that interface{}) bool { +func (this *SchemaResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*AgentUpdatesResponse) + that1, ok := that.(*SchemaResponse) if !ok { - that2, ok := that.(AgentUpdatesResponse) + that2, ok := that.(SchemaResponse) if ok { that1 = &that2 } else { @@ -2349,38 +2545,19 @@ func (this *AgentUpdatesResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.AgentUpdates) != len(that1.AgentUpdates) { - return false - } - for i := range this.AgentUpdates { - if !this.AgentUpdates[i].Equal(that1.AgentUpdates[i]) { - return false - } - } - if len(this.AgentSchemas) != len(that1.AgentSchemas) { - return false - } - for i := range this.AgentSchemas { - if !this.AgentSchemas[i].Equal(that1.AgentSchemas[i]) { - return false - } - } - if this.AgentSchemasUpdated != that1.AgentSchemasUpdated { - return false - } - if this.EndOfVersion != that1.EndOfVersion { + if !this.Schema.Equal(that1.Schema) { return false } return true } -func (this *WithPrefixKeyRequest) Equal(that interface{}) bool { +func (this *AgentInfoRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*WithPrefixKeyRequest) + that1, ok := that.(*AgentInfoRequest) if !ok { - that2, ok := that.(WithPrefixKeyRequest) + that2, ok := that.(AgentInfoRequest) if ok { that1 = &that2 } else { @@ -2392,22 +2569,16 @@ func (this *WithPrefixKeyRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Prefix != that1.Prefix { - return false - } - if this.Proto != that1.Proto { - return false - } return true } -func (this *WithPrefixKeyResponse) Equal(that interface{}) bool { +func (this *AgentInfoResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*WithPrefixKeyResponse) + that1, ok := that.(*AgentInfoResponse) if !ok { - that2, ok := that.(WithPrefixKeyResponse) + that2, ok := that.(AgentInfoResponse) if ok { that1 = &that2 } else { @@ -2419,24 +2590,24 @@ func (this *WithPrefixKeyResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Kvs) != len(that1.Kvs) { + if len(this.Info) != len(that1.Info) { return false } - for i := range this.Kvs { - if !this.Kvs[i].Equal(that1.Kvs[i]) { + for i := range this.Info { + if !this.Info[i].Equal(that1.Info[i]) { return false } } return true } -func (this *WithPrefixKeyResponse_KV) Equal(that interface{}) bool { +func (this *AgentMetadata) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*WithPrefixKeyResponse_KV) + that1, ok := that.(*AgentMetadata) if !ok { - that2, ok := that.(WithPrefixKeyResponse_KV) + that2, ok := that.(AgentMetadata) if ok { that1 = &that2 } else { @@ -2448,22 +2619,25 @@ func (this *WithPrefixKeyResponse_KV) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Key != that1.Key { + if !this.Agent.Equal(that1.Agent) { return false } - if !bytes.Equal(this.Value, that1.Value) { + if !this.Status.Equal(that1.Status) { + return false + } + if !this.CarnotInfo.Equal(that1.CarnotInfo) { return false } return true } -func (this *RegisterTracepointRequest) Equal(that interface{}) bool { +func (this *AgentUpdatesRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterTracepointRequest) + that1, ok := that.(*AgentUpdatesRequest) if !ok { - that2, ok := that.(RegisterTracepointRequest) + that2, ok := that.(AgentUpdatesRequest) if ok { that1 = &that2 } else { @@ -2475,24 +2649,22 @@ func (this *RegisterTracepointRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Requests) != len(that1.Requests) { + if !this.MaxUpdateInterval.Equal(that1.MaxUpdateInterval) { return false } - for i := range this.Requests { - if !this.Requests[i].Equal(that1.Requests[i]) { - return false - } + if this.MaxUpdatesPerResponse != that1.MaxUpdatesPerResponse { + return false } return true } -func (this *RegisterTracepointRequest_TracepointRequest) Equal(that interface{}) bool { +func (this *AgentUpdate) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterTracepointRequest_TracepointRequest) + that1, ok := that.(*AgentUpdate) if !ok { - that2, ok := that.(RegisterTracepointRequest_TracepointRequest) + that2, ok := that.(AgentUpdate) if ok { that1 = &that2 } else { @@ -2504,25 +2676,28 @@ func (this *RegisterTracepointRequest_TracepointRequest) Equal(that interface{}) } else if this == nil { return false } - if !this.TracepointDeployment.Equal(that1.TracepointDeployment) { + if !this.AgentID.Equal(that1.AgentID) { return false } - if this.Name != that1.Name { + if that1.Update == nil { + if this.Update != nil { + return false + } + } else if this.Update == nil { return false - } - if !this.TTL.Equal(that1.TTL) { + } else if !this.Update.Equal(that1.Update) { return false } return true } -func (this *RegisterTracepointResponse) Equal(that interface{}) bool { +func (this *AgentUpdate_Deleted) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterTracepointResponse) + that1, ok := that.(*AgentUpdate_Deleted) if !ok { - that2, ok := that.(RegisterTracepointResponse) + that2, ok := that.(AgentUpdate_Deleted) if ok { that1 = &that2 } else { @@ -2534,27 +2709,19 @@ func (this *RegisterTracepointResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Tracepoints) != len(that1.Tracepoints) { - return false - } - for i := range this.Tracepoints { - if !this.Tracepoints[i].Equal(that1.Tracepoints[i]) { - return false - } - } - if !this.Status.Equal(that1.Status) { + if this.Deleted != that1.Deleted { return false } return true } -func (this *RegisterTracepointResponse_TracepointStatus) Equal(that interface{}) bool { +func (this *AgentUpdate_Agent) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterTracepointResponse_TracepointStatus) + that1, ok := that.(*AgentUpdate_Agent) if !ok { - that2, ok := that.(RegisterTracepointResponse_TracepointStatus) + that2, ok := that.(AgentUpdate_Agent) if ok { that1 = &that2 } else { @@ -2566,25 +2733,19 @@ func (this *RegisterTracepointResponse_TracepointStatus) Equal(that interface{}) } else if this == nil { return false } - if !this.Status.Equal(that1.Status) { - return false - } - if !this.ID.Equal(that1.ID) { - return false - } - if this.Name != that1.Name { + if !this.Agent.Equal(that1.Agent) { return false } return true } -func (this *GetTracepointInfoRequest) Equal(that interface{}) bool { +func (this *AgentUpdate_DataInfo) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetTracepointInfoRequest) + that1, ok := that.(*AgentUpdate_DataInfo) if !ok { - that2, ok := that.(GetTracepointInfoRequest) + that2, ok := that.(AgentUpdate_DataInfo) if ok { that1 = &that2 } else { @@ -2596,24 +2757,19 @@ func (this *GetTracepointInfoRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.IDs) != len(that1.IDs) { + if !this.DataInfo.Equal(that1.DataInfo) { return false } - for i := range this.IDs { - if !this.IDs[i].Equal(that1.IDs[i]) { - return false - } - } return true } -func (this *GetTracepointInfoResponse) Equal(that interface{}) bool { +func (this *AgentUpdatesResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetTracepointInfoResponse) + that1, ok := that.(*AgentUpdatesResponse) if !ok { - that2, ok := that.(GetTracepointInfoResponse) + that2, ok := that.(AgentUpdatesResponse) if ok { that1 = &that2 } else { @@ -2625,24 +2781,38 @@ func (this *GetTracepointInfoResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Tracepoints) != len(that1.Tracepoints) { + if len(this.AgentUpdates) != len(that1.AgentUpdates) { return false } - for i := range this.Tracepoints { - if !this.Tracepoints[i].Equal(that1.Tracepoints[i]) { + for i := range this.AgentUpdates { + if !this.AgentUpdates[i].Equal(that1.AgentUpdates[i]) { + return false + } + } + if len(this.AgentSchemas) != len(that1.AgentSchemas) { + return false + } + for i := range this.AgentSchemas { + if !this.AgentSchemas[i].Equal(that1.AgentSchemas[i]) { return false } } + if this.AgentSchemasUpdated != that1.AgentSchemasUpdated { + return false + } + if this.EndOfVersion != that1.EndOfVersion { + return false + } return true } -func (this *GetTracepointInfoResponse_TracepointState) Equal(that interface{}) bool { +func (this *WithPrefixKeyRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetTracepointInfoResponse_TracepointState) + that1, ok := that.(*WithPrefixKeyRequest) if !ok { - that2, ok := that.(GetTracepointInfoResponse_TracepointState) + that2, ok := that.(WithPrefixKeyRequest) if ok { that1 = &that2 } else { @@ -2654,44 +2824,22 @@ func (this *GetTracepointInfoResponse_TracepointState) Equal(that interface{}) b } else if this == nil { return false } - if !this.ID.Equal(that1.ID) { - return false - } - if this.State != that1.State { - return false - } - if len(this.Statuses) != len(that1.Statuses) { - return false - } - for i := range this.Statuses { - if !this.Statuses[i].Equal(that1.Statuses[i]) { - return false - } - } - if this.Name != that1.Name { - return false - } - if this.ExpectedState != that1.ExpectedState { + if this.Prefix != that1.Prefix { return false } - if len(this.SchemaNames) != len(that1.SchemaNames) { + if this.Proto != that1.Proto { return false } - for i := range this.SchemaNames { - if this.SchemaNames[i] != that1.SchemaNames[i] { - return false - } - } return true } -func (this *RemoveTracepointRequest) Equal(that interface{}) bool { +func (this *WithPrefixKeyResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RemoveTracepointRequest) + that1, ok := that.(*WithPrefixKeyResponse) if !ok { - that2, ok := that.(RemoveTracepointRequest) + that2, ok := that.(WithPrefixKeyResponse) if ok { that1 = &that2 } else { @@ -2703,24 +2851,24 @@ func (this *RemoveTracepointRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Names) != len(that1.Names) { + if len(this.Kvs) != len(that1.Kvs) { return false } - for i := range this.Names { - if this.Names[i] != that1.Names[i] { + for i := range this.Kvs { + if !this.Kvs[i].Equal(that1.Kvs[i]) { return false } } return true } -func (this *RemoveTracepointResponse) Equal(that interface{}) bool { +func (this *WithPrefixKeyResponse_KV) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RemoveTracepointResponse) + that1, ok := that.(*WithPrefixKeyResponse_KV) if !ok { - that2, ok := that.(RemoveTracepointResponse) + that2, ok := that.(WithPrefixKeyResponse_KV) if ok { that1 = &that2 } else { @@ -2732,19 +2880,22 @@ func (this *RemoveTracepointResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.Status.Equal(that1.Status) { + if this.Key != that1.Key { + return false + } + if !bytes.Equal(this.Value, that1.Value) { return false } return true } -func (this *UpdateConfigRequest) Equal(that interface{}) bool { +func (this *RegisterFileSourceRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UpdateConfigRequest) + that1, ok := that.(*RegisterFileSourceRequest) if !ok { - that2, ok := that.(UpdateConfigRequest) + that2, ok := that.(RegisterFileSourceRequest) if ok { that1 = &that2 } else { @@ -2756,25 +2907,24 @@ func (this *UpdateConfigRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Key != that1.Key { - return false - } - if this.Value != that1.Value { + if len(this.Requests) != len(that1.Requests) { return false } - if this.AgentPodName != that1.AgentPodName { - return false + for i := range this.Requests { + if !this.Requests[i].Equal(that1.Requests[i]) { + return false + } } return true } -func (this *UpdateConfigResponse) Equal(that interface{}) bool { +func (this *RegisterFileSourceResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UpdateConfigResponse) + that1, ok := that.(*RegisterFileSourceResponse) if !ok { - that2, ok := that.(UpdateConfigResponse) + that2, ok := that.(RegisterFileSourceResponse) if ok { that1 = &that2 } else { @@ -2786,19 +2936,27 @@ func (this *UpdateConfigResponse) Equal(that interface{}) bool { } else if this == nil { return false } + if len(this.FileSources) != len(that1.FileSources) { + return false + } + for i := range this.FileSources { + if !this.FileSources[i].Equal(that1.FileSources[i]) { + return false + } + } if !this.Status.Equal(that1.Status) { return false } return true } -func (this *GetScriptsRequest) Equal(that interface{}) bool { +func (this *RegisterFileSourceResponse_FileSourceStatus) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetScriptsRequest) + that1, ok := that.(*RegisterFileSourceResponse_FileSourceStatus) if !ok { - that2, ok := that.(GetScriptsRequest) + that2, ok := that.(RegisterFileSourceResponse_FileSourceStatus) if ok { that1 = &that2 } else { @@ -2810,16 +2968,25 @@ func (this *GetScriptsRequest) Equal(that interface{}) bool { } else if this == nil { return false } + if !this.Status.Equal(that1.Status) { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + if this.Name != that1.Name { + return false + } return true } -func (this *GetScriptsResponse) Equal(that interface{}) bool { +func (this *GetFileSourceInfoRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetScriptsResponse) + that1, ok := that.(*GetFileSourceInfoRequest) if !ok { - that2, ok := that.(GetScriptsResponse) + that2, ok := that.(GetFileSourceInfoRequest) if ok { that1 = &that2 } else { @@ -2831,24 +2998,24 @@ func (this *GetScriptsResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Scripts) != len(that1.Scripts) { + if len(this.IDs) != len(that1.IDs) { return false } - for i := range this.Scripts { - if !this.Scripts[i].Equal(that1.Scripts[i]) { + for i := range this.IDs { + if !this.IDs[i].Equal(that1.IDs[i]) { return false } } return true } -func (this *AddOrUpdateScriptRequest) Equal(that interface{}) bool { +func (this *GetFileSourceInfoResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*AddOrUpdateScriptRequest) + that1, ok := that.(*GetFileSourceInfoResponse) if !ok { - that2, ok := that.(AddOrUpdateScriptRequest) + that2, ok := that.(GetFileSourceInfoResponse) if ok { that1 = &that2 } else { @@ -2860,19 +3027,24 @@ func (this *AddOrUpdateScriptRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.Script.Equal(that1.Script) { + if len(this.FileSources) != len(that1.FileSources) { return false } + for i := range this.FileSources { + if !this.FileSources[i].Equal(that1.FileSources[i]) { + return false + } + } return true } -func (this *AddOrUpdateScriptResponse) Equal(that interface{}) bool { +func (this *GetFileSourceInfoResponse_FileSourceState) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*AddOrUpdateScriptResponse) + that1, ok := that.(*GetFileSourceInfoResponse_FileSourceState) if !ok { - that2, ok := that.(AddOrUpdateScriptResponse) + that2, ok := that.(GetFileSourceInfoResponse_FileSourceState) if ok { that1 = &that2 } else { @@ -2884,16 +3056,44 @@ func (this *AddOrUpdateScriptResponse) Equal(that interface{}) bool { } else if this == nil { return false } + if !this.ID.Equal(that1.ID) { + return false + } + if this.State != that1.State { + return false + } + if len(this.Statuses) != len(that1.Statuses) { + return false + } + for i := range this.Statuses { + if !this.Statuses[i].Equal(that1.Statuses[i]) { + return false + } + } + if this.Name != that1.Name { + return false + } + if this.ExpectedState != that1.ExpectedState { + return false + } + if len(this.SchemaNames) != len(that1.SchemaNames) { + return false + } + for i := range this.SchemaNames { + if this.SchemaNames[i] != that1.SchemaNames[i] { + return false + } + } return true } -func (this *DeleteScriptRequest) Equal(that interface{}) bool { +func (this *RemoveFileSourceRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*DeleteScriptRequest) + that1, ok := that.(*RemoveFileSourceRequest) if !ok { - that2, ok := that.(DeleteScriptRequest) + that2, ok := that.(RemoveFileSourceRequest) if ok { that1 = &that2 } else { @@ -2905,19 +3105,24 @@ func (this *DeleteScriptRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.ScriptID.Equal(that1.ScriptID) { + if len(this.Names) != len(that1.Names) { return false } + for i := range this.Names { + if this.Names[i] != that1.Names[i] { + return false + } + } return true } -func (this *DeleteScriptResponse) Equal(that interface{}) bool { +func (this *RemoveFileSourceResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*DeleteScriptResponse) + that1, ok := that.(*RemoveFileSourceResponse) if !ok { - that2, ok := that.(DeleteScriptResponse) + that2, ok := that.(RemoveFileSourceResponse) if ok { that1 = &that2 } else { @@ -2929,16 +3134,19 @@ func (this *DeleteScriptResponse) Equal(that interface{}) bool { } else if this == nil { return false } + if !this.Status.Equal(that1.Status) { + return false + } return true } -func (this *SetScriptsRequest) Equal(that interface{}) bool { +func (this *RegisterTracepointRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*SetScriptsRequest) + that1, ok := that.(*RegisterTracepointRequest) if !ok { - that2, ok := that.(SetScriptsRequest) + that2, ok := that.(RegisterTracepointRequest) if ok { that1 = &that2 } else { @@ -2950,24 +3158,24 @@ func (this *SetScriptsRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Scripts) != len(that1.Scripts) { + if len(this.Requests) != len(that1.Requests) { return false } - for i := range this.Scripts { - if !this.Scripts[i].Equal(that1.Scripts[i]) { + for i := range this.Requests { + if !this.Requests[i].Equal(that1.Requests[i]) { return false } } return true } -func (this *SetScriptsResponse) Equal(that interface{}) bool { +func (this *RegisterTracepointRequest_TracepointRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*SetScriptsResponse) + that1, ok := that.(*RegisterTracepointRequest_TracepointRequest) if !ok { - that2, ok := that.(SetScriptsResponse) + that2, ok := that.(RegisterTracepointRequest_TracepointRequest) if ok { that1 = &that2 } else { @@ -2979,16 +3187,25 @@ func (this *SetScriptsResponse) Equal(that interface{}) bool { } else if this == nil { return false } - return true -} -func (this *ExecutionStats) Equal(that interface{}) bool { - if that == nil { - return this == nil + if !this.TracepointDeployment.Equal(that1.TracepointDeployment) { + return false + } + if this.Name != that1.Name { + return false + } + if !this.TTL.Equal(that1.TTL) { + return false + } + return true +} +func (this *RegisterTracepointResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - that1, ok := that.(*ExecutionStats) + that1, ok := that.(*RegisterTracepointResponse) if !ok { - that2, ok := that.(ExecutionStats) + that2, ok := that.(RegisterTracepointResponse) if ok { that1 = &that2 } else { @@ -3000,28 +3217,27 @@ func (this *ExecutionStats) Equal(that interface{}) bool { } else if this == nil { return false } - if this.ExecutionTimeNs != that1.ExecutionTimeNs { - return false - } - if this.CompilationTimeNs != that1.CompilationTimeNs { + if len(this.Tracepoints) != len(that1.Tracepoints) { return false } - if this.BytesProcessed != that1.BytesProcessed { - return false + for i := range this.Tracepoints { + if !this.Tracepoints[i].Equal(that1.Tracepoints[i]) { + return false + } } - if this.RecordsProcessed != that1.RecordsProcessed { + if !this.Status.Equal(that1.Status) { return false } return true } -func (this *RecordExecutionResultRequest) Equal(that interface{}) bool { +func (this *RegisterTracepointResponse_TracepointStatus) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RecordExecutionResultRequest) + that1, ok := that.(*RegisterTracepointResponse_TracepointStatus) if !ok { - that2, ok := that.(RecordExecutionResultRequest) + that2, ok := that.(RegisterTracepointResponse_TracepointStatus) if ok { that1 = &that2 } else { @@ -3033,31 +3249,54 @@ func (this *RecordExecutionResultRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.ScriptID.Equal(that1.ScriptID) { + if !this.Status.Equal(that1.Status) { return false } - if !this.Timestamp.Equal(that1.Timestamp) { + if !this.ID.Equal(that1.ID) { return false } - if that1.Result == nil { - if this.Result != nil { + if this.Name != that1.Name { + return false + } + return true +} +func (this *GetTracepointInfoRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetTracepointInfoRequest) + if !ok { + that2, ok := that.(GetTracepointInfoRequest) + if ok { + that1 = &that2 + } else { return false } - } else if this.Result == nil { + } + if that1 == nil { + return this == nil + } else if this == nil { return false - } else if !this.Result.Equal(that1.Result) { + } + if len(this.IDs) != len(that1.IDs) { return false } + for i := range this.IDs { + if !this.IDs[i].Equal(that1.IDs[i]) { + return false + } + } return true } -func (this *RecordExecutionResultRequest_Error) Equal(that interface{}) bool { +func (this *GetTracepointInfoResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RecordExecutionResultRequest_Error) + that1, ok := that.(*GetTracepointInfoResponse) if !ok { - that2, ok := that.(RecordExecutionResultRequest_Error) + that2, ok := that.(GetTracepointInfoResponse) if ok { that1 = &that2 } else { @@ -3069,19 +3308,24 @@ func (this *RecordExecutionResultRequest_Error) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.Error.Equal(that1.Error) { + if len(this.Tracepoints) != len(that1.Tracepoints) { return false } + for i := range this.Tracepoints { + if !this.Tracepoints[i].Equal(that1.Tracepoints[i]) { + return false + } + } return true } -func (this *RecordExecutionResultRequest_ExecutionStats) Equal(that interface{}) bool { +func (this *GetTracepointInfoResponse_TracepointState) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RecordExecutionResultRequest_ExecutionStats) + that1, ok := that.(*GetTracepointInfoResponse_TracepointState) if !ok { - that2, ok := that.(RecordExecutionResultRequest_ExecutionStats) + that2, ok := that.(GetTracepointInfoResponse_TracepointState) if ok { that1 = &that2 } else { @@ -3093,19 +3337,44 @@ func (this *RecordExecutionResultRequest_ExecutionStats) Equal(that interface{}) } else if this == nil { return false } - if !this.ExecutionStats.Equal(that1.ExecutionStats) { + if !this.ID.Equal(that1.ID) { + return false + } + if this.State != that1.State { return false } + if len(this.Statuses) != len(that1.Statuses) { + return false + } + for i := range this.Statuses { + if !this.Statuses[i].Equal(that1.Statuses[i]) { + return false + } + } + if this.Name != that1.Name { + return false + } + if this.ExpectedState != that1.ExpectedState { + return false + } + if len(this.SchemaNames) != len(that1.SchemaNames) { + return false + } + for i := range this.SchemaNames { + if this.SchemaNames[i] != that1.SchemaNames[i] { + return false + } + } return true } -func (this *RecordExecutionResultResponse) Equal(that interface{}) bool { +func (this *RemoveTracepointRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RecordExecutionResultResponse) + that1, ok := that.(*RemoveTracepointRequest) if !ok { - that2, ok := that.(RecordExecutionResultResponse) + that2, ok := that.(RemoveTracepointRequest) if ok { that1 = &that2 } else { @@ -3117,16 +3386,24 @@ func (this *RecordExecutionResultResponse) Equal(that interface{}) bool { } else if this == nil { return false } + if len(this.Names) != len(that1.Names) { + return false + } + for i := range this.Names { + if this.Names[i] != that1.Names[i] { + return false + } + } return true } -func (this *GetAllExecutionResultsRequest) Equal(that interface{}) bool { +func (this *RemoveTracepointResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetAllExecutionResultsRequest) + that1, ok := that.(*RemoveTracepointResponse) if !ok { - that2, ok := that.(GetAllExecutionResultsRequest) + that2, ok := that.(RemoveTracepointResponse) if ok { that1 = &that2 } else { @@ -3138,16 +3415,19 @@ func (this *GetAllExecutionResultsRequest) Equal(that interface{}) bool { } else if this == nil { return false } + if !this.Status.Equal(that1.Status) { + return false + } return true } -func (this *GetAllExecutionResultsResponse) Equal(that interface{}) bool { +func (this *UpdateConfigRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetAllExecutionResultsResponse) + that1, ok := that.(*UpdateConfigRequest) if !ok { - that2, ok := that.(GetAllExecutionResultsResponse) + that2, ok := that.(UpdateConfigRequest) if ok { that1 = &that2 } else { @@ -3159,24 +3439,25 @@ func (this *GetAllExecutionResultsResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Results) != len(that1.Results) { + if this.Key != that1.Key { return false } - for i := range this.Results { - if !this.Results[i].Equal(that1.Results[i]) { - return false - } + if this.Value != that1.Value { + return false + } + if this.AgentPodName != that1.AgentPodName { + return false } return true } -func (this *GetAllExecutionResultsResponse_ExecutionResult) Equal(that interface{}) bool { +func (this *UpdateConfigResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetAllExecutionResultsResponse_ExecutionResult) + that1, ok := that.(*UpdateConfigResponse) if !ok { - that2, ok := that.(GetAllExecutionResultsResponse_ExecutionResult) + that2, ok := that.(UpdateConfigResponse) if ok { that1 = &that2 } else { @@ -3188,31 +3469,40 @@ func (this *GetAllExecutionResultsResponse_ExecutionResult) Equal(that interface } else if this == nil { return false } - if !this.ScriptID.Equal(that1.ScriptID) { + if !this.Status.Equal(that1.Status) { return false } - if !this.Timestamp.Equal(that1.Timestamp) { - return false + return true +} +func (this *GetScriptsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - if that1.Result == nil { - if this.Result != nil { + + that1, ok := that.(*GetScriptsRequest) + if !ok { + that2, ok := that.(GetScriptsRequest) + if ok { + that1 = &that2 + } else { return false } - } else if this.Result == nil { - return false - } else if !this.Result.Equal(that1.Result) { + } + if that1 == nil { + return this == nil + } else if this == nil { return false } return true } -func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) Equal(that interface{}) bool { +func (this *GetScriptsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetAllExecutionResultsResponse_ExecutionResult_Error) + that1, ok := that.(*GetScriptsResponse) if !ok { - that2, ok := that.(GetAllExecutionResultsResponse_ExecutionResult_Error) + that2, ok := that.(GetScriptsResponse) if ok { that1 = &that2 } else { @@ -3224,19 +3514,24 @@ func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) Equal(that int } else if this == nil { return false } - if !this.Error.Equal(that1.Error) { + if len(this.Scripts) != len(that1.Scripts) { return false } + for i := range this.Scripts { + if !this.Scripts[i].Equal(that1.Scripts[i]) { + return false + } + } return true } -func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) Equal(that interface{}) bool { +func (this *AddOrUpdateScriptRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) + that1, ok := that.(*AddOrUpdateScriptRequest) if !ok { - that2, ok := that.(GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) + that2, ok := that.(AddOrUpdateScriptRequest) if ok { that1 = &that2 } else { @@ -3248,1659 +3543,1871 @@ func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) Equal } else if this == nil { return false } - if !this.ExecutionStats.Equal(that1.ExecutionStats) { + if !this.Script.Equal(that1.Script) { return false } return true } -func (this *SchemaRequest) GoString() string { - if this == nil { - return "nil" +func (this *AddOrUpdateScriptResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 4) - s = append(s, "&metadatapb.SchemaRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SchemaResponse) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*AddOrUpdateScriptResponse) + if !ok { + that2, ok := that.(AddOrUpdateScriptResponse) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.SchemaResponse{") - if this.Schema != nil { - s = append(s, "Schema: "+fmt.Sprintf("%#v", this.Schema)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *AgentInfoRequest) GoString() string { - if this == nil { - return "nil" +func (this *DeleteScriptRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 4) - s = append(s, "&metadatapb.AgentInfoRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *AgentInfoResponse) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*DeleteScriptRequest) + if !ok { + that2, ok := that.(DeleteScriptRequest) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.AgentInfoResponse{") - if this.Info != nil { - s = append(s, "Info: "+fmt.Sprintf("%#v", this.Info)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *AgentMetadata) GoString() string { - if this == nil { - return "nil" + if !this.ScriptID.Equal(that1.ScriptID) { + return false } - s := make([]string, 0, 7) - s = append(s, "&metadatapb.AgentMetadata{") - if this.Agent != nil { - s = append(s, "Agent: "+fmt.Sprintf("%#v", this.Agent)+",\n") + return true +} +func (this *DeleteScriptResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + + that1, ok := that.(*DeleteScriptResponse) + if !ok { + that2, ok := that.(DeleteScriptResponse) + if ok { + that1 = &that2 + } else { + return false + } } - if this.CarnotInfo != nil { - s = append(s, "CarnotInfo: "+fmt.Sprintf("%#v", this.CarnotInfo)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *AgentUpdatesRequest) GoString() string { - if this == nil { - return "nil" +func (this *SetScriptsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 6) - s = append(s, "&metadatapb.AgentUpdatesRequest{") - if this.MaxUpdateInterval != nil { - s = append(s, "MaxUpdateInterval: "+fmt.Sprintf("%#v", this.MaxUpdateInterval)+",\n") + + that1, ok := that.(*SetScriptsRequest) + if !ok { + that2, ok := that.(SetScriptsRequest) + if ok { + that1 = &that2 + } else { + return false + } } - s = append(s, "MaxUpdatesPerResponse: "+fmt.Sprintf("%#v", this.MaxUpdatesPerResponse)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *AgentUpdate) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := make([]string, 0, 8) - s = append(s, "&metadatapb.AgentUpdate{") - if this.AgentID != nil { - s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") + if len(this.Scripts) != len(that1.Scripts) { + return false } - if this.Update != nil { - s = append(s, "Update: "+fmt.Sprintf("%#v", this.Update)+",\n") + for i := range this.Scripts { + if !this.Scripts[i].Equal(that1.Scripts[i]) { + return false + } } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *AgentUpdate_Deleted) GoString() string { - if this == nil { - return "nil" +func (this *SetScriptsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := strings.Join([]string{`&metadatapb.AgentUpdate_Deleted{` + - `Deleted:` + fmt.Sprintf("%#v", this.Deleted) + `}`}, ", ") - return s -} -func (this *AgentUpdate_Agent) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*SetScriptsResponse) + if !ok { + that2, ok := that.(SetScriptsResponse) + if ok { + that1 = &that2 + } else { + return false + } } - s := strings.Join([]string{`&metadatapb.AgentUpdate_Agent{` + - `Agent:` + fmt.Sprintf("%#v", this.Agent) + `}`}, ", ") - return s -} -func (this *AgentUpdate_DataInfo) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := strings.Join([]string{`&metadatapb.AgentUpdate_DataInfo{` + - `DataInfo:` + fmt.Sprintf("%#v", this.DataInfo) + `}`}, ", ") - return s + return true } -func (this *AgentUpdatesResponse) GoString() string { - if this == nil { - return "nil" +func (this *ExecutionStats) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 8) - s = append(s, "&metadatapb.AgentUpdatesResponse{") - if this.AgentUpdates != nil { - s = append(s, "AgentUpdates: "+fmt.Sprintf("%#v", this.AgentUpdates)+",\n") + + that1, ok := that.(*ExecutionStats) + if !ok { + that2, ok := that.(ExecutionStats) + if ok { + that1 = &that2 + } else { + return false + } } - if this.AgentSchemas != nil { - s = append(s, "AgentSchemas: "+fmt.Sprintf("%#v", this.AgentSchemas)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "AgentSchemasUpdated: "+fmt.Sprintf("%#v", this.AgentSchemasUpdated)+",\n") - s = append(s, "EndOfVersion: "+fmt.Sprintf("%#v", this.EndOfVersion)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WithPrefixKeyRequest) GoString() string { - if this == nil { - return "nil" + if this.ExecutionTimeNs != that1.ExecutionTimeNs { + return false } - s := make([]string, 0, 6) - s = append(s, "&metadatapb.WithPrefixKeyRequest{") - s = append(s, "Prefix: "+fmt.Sprintf("%#v", this.Prefix)+",\n") - s = append(s, "Proto: "+fmt.Sprintf("%#v", this.Proto)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WithPrefixKeyResponse) GoString() string { - if this == nil { - return "nil" + if this.CompilationTimeNs != that1.CompilationTimeNs { + return false } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.WithPrefixKeyResponse{") - if this.Kvs != nil { - s = append(s, "Kvs: "+fmt.Sprintf("%#v", this.Kvs)+",\n") + if this.BytesProcessed != that1.BytesProcessed { + return false } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WithPrefixKeyResponse_KV) GoString() string { - if this == nil { - return "nil" + if this.RecordsProcessed != that1.RecordsProcessed { + return false } - s := make([]string, 0, 6) - s = append(s, "&metadatapb.WithPrefixKeyResponse_KV{") - s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *RegisterTracepointRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.RegisterTracepointRequest{") - if this.Requests != nil { - s = append(s, "Requests: "+fmt.Sprintf("%#v", this.Requests)+",\n") +func (this *RecordExecutionResultRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RegisterTracepointRequest_TracepointRequest) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*RecordExecutionResultRequest) + if !ok { + that2, ok := that.(RecordExecutionResultRequest) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 7) - s = append(s, "&metadatapb.RegisterTracepointRequest_TracepointRequest{") - if this.TracepointDeployment != nil { - s = append(s, "TracepointDeployment: "+fmt.Sprintf("%#v", this.TracepointDeployment)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - if this.TTL != nil { - s = append(s, "TTL: "+fmt.Sprintf("%#v", this.TTL)+",\n") + if !this.ScriptID.Equal(that1.ScriptID) { + return false } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RegisterTracepointResponse) GoString() string { - if this == nil { - return "nil" + if !this.Timestamp.Equal(that1.Timestamp) { + return false } - s := make([]string, 0, 6) - s = append(s, "&metadatapb.RegisterTracepointResponse{") - if this.Tracepoints != nil { - s = append(s, "Tracepoints: "+fmt.Sprintf("%#v", this.Tracepoints)+",\n") - } - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + if that1.Result == nil { + if this.Result != nil { + return false + } + } else if this.Result == nil { + return false + } else if !this.Result.Equal(that1.Result) { + return false } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *RegisterTracepointResponse_TracepointStatus) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&metadatapb.RegisterTracepointResponse_TracepointStatus{") - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") +func (this *RecordExecutionResultRequest_Error) Equal(that interface{}) bool { + if that == nil { + return this == nil } - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + + that1, ok := that.(*RecordExecutionResultRequest_Error) + if !ok { + that2, ok := that.(RecordExecutionResultRequest_Error) + if ok { + that1 = &that2 + } else { + return false + } } - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetTracepointInfoRequest) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.GetTracepointInfoRequest{") - if this.IDs != nil { - s = append(s, "IDs: "+fmt.Sprintf("%#v", this.IDs)+",\n") + if !this.Error.Equal(that1.Error) { + return false } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *GetTracepointInfoResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.GetTracepointInfoResponse{") - if this.Tracepoints != nil { - s = append(s, "Tracepoints: "+fmt.Sprintf("%#v", this.Tracepoints)+",\n") +func (this *RecordExecutionResultRequest_ExecutionStats) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetTracepointInfoResponse_TracepointState) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*RecordExecutionResultRequest_ExecutionStats) + if !ok { + that2, ok := that.(RecordExecutionResultRequest_ExecutionStats) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 10) - s = append(s, "&metadatapb.GetTracepointInfoResponse_TracepointState{") - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") - if this.Statuses != nil { - s = append(s, "Statuses: "+fmt.Sprintf("%#v", this.Statuses)+",\n") + if !this.ExecutionStats.Equal(that1.ExecutionStats) { + return false } - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "ExpectedState: "+fmt.Sprintf("%#v", this.ExpectedState)+",\n") - s = append(s, "SchemaNames: "+fmt.Sprintf("%#v", this.SchemaNames)+",\n") - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *RemoveTracepointRequest) GoString() string { - if this == nil { - return "nil" +func (this *RecordExecutionResultResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.RemoveTracepointRequest{") - s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RemoveTracepointResponse) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*RecordExecutionResultResponse) + if !ok { + that2, ok := that.(RecordExecutionResultResponse) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.RemoveTracepointResponse{") - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *UpdateConfigRequest) GoString() string { - if this == nil { - return "nil" +func (this *GetAllExecutionResultsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 7) - s = append(s, "&metadatapb.UpdateConfigRequest{") - s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "AgentPodName: "+fmt.Sprintf("%#v", this.AgentPodName)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UpdateConfigResponse) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*GetAllExecutionResultsRequest) + if !ok { + that2, ok := that.(GetAllExecutionResultsRequest) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.UpdateConfigResponse{") - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *GetScriptsRequest) GoString() string { - if this == nil { - return "nil" +func (this *GetAllExecutionResultsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 4) - s = append(s, "&metadatapb.GetScriptsRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetScriptsResponse) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*GetAllExecutionResultsResponse) + if !ok { + that2, ok := that.(GetAllExecutionResultsResponse) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.GetScriptsResponse{") - keysForScripts := make([]string, 0, len(this.Scripts)) - for k, _ := range this.Scripts { - keysForScripts = append(keysForScripts, k) + if that1 == nil { + return this == nil + } else if this == nil { + return false } - github_com_gogo_protobuf_sortkeys.Strings(keysForScripts) - mapStringForScripts := "map[string]*cvmsgspb.CronScript{" - for _, k := range keysForScripts { - mapStringForScripts += fmt.Sprintf("%#v: %#v,", k, this.Scripts[k]) + if len(this.Results) != len(that1.Results) { + return false } - mapStringForScripts += "}" - if this.Scripts != nil { - s = append(s, "Scripts: "+mapStringForScripts+",\n") + for i := range this.Results { + if !this.Results[i].Equal(that1.Results[i]) { + return false + } } - s = append(s, "}") - return strings.Join(s, "") + return true } -func (this *AddOrUpdateScriptRequest) GoString() string { - if this == nil { - return "nil" +func (this *GetAllExecutionResultsResponse_ExecutionResult) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.AddOrUpdateScriptRequest{") - if this.Script != nil { - s = append(s, "Script: "+fmt.Sprintf("%#v", this.Script)+",\n") + + that1, ok := that.(*GetAllExecutionResultsResponse_ExecutionResult) + if !ok { + that2, ok := that.(GetAllExecutionResultsResponse_ExecutionResult) + if ok { + that1 = &that2 + } else { + return false + } } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *AddOrUpdateScriptResponse) GoString() string { - if this == nil { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - s := make([]string, 0, 4) - s = append(s, "&metadatapb.AddOrUpdateScriptResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DeleteScriptRequest) GoString() string { + if !this.ScriptID.Equal(that1.ScriptID) { + return false + } + if !this.Timestamp.Equal(that1.Timestamp) { + return false + } + if that1.Result == nil { + if this.Result != nil { + return false + } + } else if this.Result == nil { + return false + } else if !this.Result.Equal(that1.Result) { + return false + } + return true +} +func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetAllExecutionResultsResponse_ExecutionResult_Error) + if !ok { + that2, ok := that.(GetAllExecutionResultsResponse_ExecutionResult_Error) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + return true +} +func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) + if !ok { + that2, ok := that.(GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ExecutionStats.Equal(that1.ExecutionStats) { + return false + } + return true +} +func (this *SchemaRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&metadatapb.SchemaRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SchemaResponse) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 5) - s = append(s, "&metadatapb.DeleteScriptRequest{") - if this.ScriptID != nil { - s = append(s, "ScriptID: "+fmt.Sprintf("%#v", this.ScriptID)+",\n") + s = append(s, "&metadatapb.SchemaResponse{") + if this.Schema != nil { + s = append(s, "Schema: "+fmt.Sprintf("%#v", this.Schema)+",\n") } s = append(s, "}") return strings.Join(s, "") } -func (this *DeleteScriptResponse) GoString() string { +func (this *AgentInfoRequest) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 4) - s = append(s, "&metadatapb.DeleteScriptResponse{") + s = append(s, "&metadatapb.AgentInfoRequest{") s = append(s, "}") return strings.Join(s, "") } -func (this *SetScriptsRequest) GoString() string { +func (this *AgentInfoResponse) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 5) - s = append(s, "&metadatapb.SetScriptsRequest{") - keysForScripts := make([]string, 0, len(this.Scripts)) - for k, _ := range this.Scripts { - keysForScripts = append(keysForScripts, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForScripts) - mapStringForScripts := "map[string]*cvmsgspb.CronScript{" - for _, k := range keysForScripts { - mapStringForScripts += fmt.Sprintf("%#v: %#v,", k, this.Scripts[k]) - } - mapStringForScripts += "}" - if this.Scripts != nil { - s = append(s, "Scripts: "+mapStringForScripts+",\n") + s = append(s, "&metadatapb.AgentInfoResponse{") + if this.Info != nil { + s = append(s, "Info: "+fmt.Sprintf("%#v", this.Info)+",\n") } s = append(s, "}") return strings.Join(s, "") } -func (this *SetScriptsResponse) GoString() string { +func (this *AgentMetadata) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 4) - s = append(s, "&metadatapb.SetScriptsResponse{") + s := make([]string, 0, 7) + s = append(s, "&metadatapb.AgentMetadata{") + if this.Agent != nil { + s = append(s, "Agent: "+fmt.Sprintf("%#v", this.Agent)+",\n") + } + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + } + if this.CarnotInfo != nil { + s = append(s, "CarnotInfo: "+fmt.Sprintf("%#v", this.CarnotInfo)+",\n") + } s = append(s, "}") return strings.Join(s, "") } -func (this *ExecutionStats) GoString() string { +func (this *AgentUpdatesRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) - s = append(s, "&metadatapb.ExecutionStats{") - s = append(s, "ExecutionTimeNs: "+fmt.Sprintf("%#v", this.ExecutionTimeNs)+",\n") - s = append(s, "CompilationTimeNs: "+fmt.Sprintf("%#v", this.CompilationTimeNs)+",\n") - s = append(s, "BytesProcessed: "+fmt.Sprintf("%#v", this.BytesProcessed)+",\n") - s = append(s, "RecordsProcessed: "+fmt.Sprintf("%#v", this.RecordsProcessed)+",\n") + s := make([]string, 0, 6) + s = append(s, "&metadatapb.AgentUpdatesRequest{") + if this.MaxUpdateInterval != nil { + s = append(s, "MaxUpdateInterval: "+fmt.Sprintf("%#v", this.MaxUpdateInterval)+",\n") + } + s = append(s, "MaxUpdatesPerResponse: "+fmt.Sprintf("%#v", this.MaxUpdatesPerResponse)+",\n") s = append(s, "}") return strings.Join(s, "") } -func (this *RecordExecutionResultRequest) GoString() string { +func (this *AgentUpdate) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 8) - s = append(s, "&metadatapb.RecordExecutionResultRequest{") - if this.ScriptID != nil { - s = append(s, "ScriptID: "+fmt.Sprintf("%#v", this.ScriptID)+",\n") - } - if this.Timestamp != nil { - s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "&metadatapb.AgentUpdate{") + if this.AgentID != nil { + s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") } - if this.Result != nil { - s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + if this.Update != nil { + s = append(s, "Update: "+fmt.Sprintf("%#v", this.Update)+",\n") } s = append(s, "}") return strings.Join(s, "") } -func (this *RecordExecutionResultRequest_Error) GoString() string { +func (this *AgentUpdate_Deleted) GoString() string { if this == nil { return "nil" } - s := strings.Join([]string{`&metadatapb.RecordExecutionResultRequest_Error{` + - `Error:` + fmt.Sprintf("%#v", this.Error) + `}`}, ", ") + s := strings.Join([]string{`&metadatapb.AgentUpdate_Deleted{` + + `Deleted:` + fmt.Sprintf("%#v", this.Deleted) + `}`}, ", ") return s } -func (this *RecordExecutionResultRequest_ExecutionStats) GoString() string { +func (this *AgentUpdate_Agent) GoString() string { if this == nil { return "nil" } - s := strings.Join([]string{`&metadatapb.RecordExecutionResultRequest_ExecutionStats{` + - `ExecutionStats:` + fmt.Sprintf("%#v", this.ExecutionStats) + `}`}, ", ") + s := strings.Join([]string{`&metadatapb.AgentUpdate_Agent{` + + `Agent:` + fmt.Sprintf("%#v", this.Agent) + `}`}, ", ") return s } -func (this *RecordExecutionResultResponse) GoString() string { +func (this *AgentUpdate_DataInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 4) - s = append(s, "&metadatapb.RecordExecutionResultResponse{") + s := strings.Join([]string{`&metadatapb.AgentUpdate_DataInfo{` + + `DataInfo:` + fmt.Sprintf("%#v", this.DataInfo) + `}`}, ", ") + return s +} +func (this *AgentUpdatesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&metadatapb.AgentUpdatesResponse{") + if this.AgentUpdates != nil { + s = append(s, "AgentUpdates: "+fmt.Sprintf("%#v", this.AgentUpdates)+",\n") + } + if this.AgentSchemas != nil { + s = append(s, "AgentSchemas: "+fmt.Sprintf("%#v", this.AgentSchemas)+",\n") + } + s = append(s, "AgentSchemasUpdated: "+fmt.Sprintf("%#v", this.AgentSchemasUpdated)+",\n") + s = append(s, "EndOfVersion: "+fmt.Sprintf("%#v", this.EndOfVersion)+",\n") s = append(s, "}") return strings.Join(s, "") } -func (this *GetAllExecutionResultsRequest) GoString() string { +func (this *WithPrefixKeyRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 4) - s = append(s, "&metadatapb.GetAllExecutionResultsRequest{") + s := make([]string, 0, 6) + s = append(s, "&metadatapb.WithPrefixKeyRequest{") + s = append(s, "Prefix: "+fmt.Sprintf("%#v", this.Prefix)+",\n") + s = append(s, "Proto: "+fmt.Sprintf("%#v", this.Proto)+",\n") s = append(s, "}") return strings.Join(s, "") } -func (this *GetAllExecutionResultsResponse) GoString() string { +func (this *WithPrefixKeyResponse) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 5) - s = append(s, "&metadatapb.GetAllExecutionResultsResponse{") - if this.Results != nil { - s = append(s, "Results: "+fmt.Sprintf("%#v", this.Results)+",\n") + s = append(s, "&metadatapb.WithPrefixKeyResponse{") + if this.Kvs != nil { + s = append(s, "Kvs: "+fmt.Sprintf("%#v", this.Kvs)+",\n") } s = append(s, "}") return strings.Join(s, "") } -func (this *GetAllExecutionResultsResponse_ExecutionResult) GoString() string { +func (this *WithPrefixKeyResponse_KV) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) - s = append(s, "&metadatapb.GetAllExecutionResultsResponse_ExecutionResult{") - if this.ScriptID != nil { - s = append(s, "ScriptID: "+fmt.Sprintf("%#v", this.ScriptID)+",\n") - } - if this.Timestamp != nil { - s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s := make([]string, 0, 6) + s = append(s, "&metadatapb.WithPrefixKeyResponse_KV{") + s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RegisterFileSourceRequest) GoString() string { + if this == nil { + return "nil" } - if this.Result != nil { - s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + s := make([]string, 0, 5) + s = append(s, "&metadatapb.RegisterFileSourceRequest{") + if this.Requests != nil { + s = append(s, "Requests: "+fmt.Sprintf("%#v", this.Requests)+",\n") } s = append(s, "}") return strings.Join(s, "") } -func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) GoString() string { +func (this *RegisterFileSourceResponse) GoString() string { if this == nil { return "nil" } - s := strings.Join([]string{`&metadatapb.GetAllExecutionResultsResponse_ExecutionResult_Error{` + - `Error:` + fmt.Sprintf("%#v", this.Error) + `}`}, ", ") - return s + s := make([]string, 0, 6) + s = append(s, "&metadatapb.RegisterFileSourceResponse{") + if this.FileSources != nil { + s = append(s, "FileSources: "+fmt.Sprintf("%#v", this.FileSources)+",\n") + } + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } -func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) GoString() string { +func (this *RegisterFileSourceResponse_FileSourceStatus) GoString() string { if this == nil { return "nil" } - s := strings.Join([]string{`&metadatapb.GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats{` + - `ExecutionStats:` + fmt.Sprintf("%#v", this.ExecutionStats) + `}`}, ", ") - return s + s := make([]string, 0, 7) + s = append(s, "&metadatapb.RegisterFileSourceResponse_FileSourceStatus{") + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + } + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "}") + return strings.Join(s, "") } -func valueToGoStringService(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { +func (this *GetFileSourceInfoRequest) GoString() string { + if this == nil { return "nil" } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetadataServiceClient is the client API for MetadataService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetadataServiceClient interface { - GetAgentUpdates(ctx context.Context, in *AgentUpdatesRequest, opts ...grpc.CallOption) (MetadataService_GetAgentUpdatesClient, error) - GetSchemas(ctx context.Context, in *SchemaRequest, opts ...grpc.CallOption) (*SchemaResponse, error) - GetAgentInfo(ctx context.Context, in *AgentInfoRequest, opts ...grpc.CallOption) (*AgentInfoResponse, error) - GetWithPrefixKey(ctx context.Context, in *WithPrefixKeyRequest, opts ...grpc.CallOption) (*WithPrefixKeyResponse, error) -} - -type metadataServiceClient struct { - cc *grpc.ClientConn + s := make([]string, 0, 5) + s = append(s, "&metadatapb.GetFileSourceInfoRequest{") + if this.IDs != nil { + s = append(s, "IDs: "+fmt.Sprintf("%#v", this.IDs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func NewMetadataServiceClient(cc *grpc.ClientConn) MetadataServiceClient { - return &metadataServiceClient{cc} +func (this *GetFileSourceInfoResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&metadatapb.GetFileSourceInfoResponse{") + if this.FileSources != nil { + s = append(s, "FileSources: "+fmt.Sprintf("%#v", this.FileSources)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func (c *metadataServiceClient) GetAgentUpdates(ctx context.Context, in *AgentUpdatesRequest, opts ...grpc.CallOption) (MetadataService_GetAgentUpdatesClient, error) { - stream, err := c.cc.NewStream(ctx, &_MetadataService_serviceDesc.Streams[0], "/px.vizier.services.metadata.MetadataService/GetAgentUpdates", opts...) - if err != nil { - return nil, err +func (this *GetFileSourceInfoResponse_FileSourceState) GoString() string { + if this == nil { + return "nil" } - x := &metadataServiceGetAgentUpdatesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err + s := make([]string, 0, 10) + s = append(s, "&metadatapb.GetFileSourceInfoResponse_FileSourceState{") + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + if this.Statuses != nil { + s = append(s, "Statuses: "+fmt.Sprintf("%#v", this.Statuses)+",\n") } - return x, nil -} - -type MetadataService_GetAgentUpdatesClient interface { - Recv() (*AgentUpdatesResponse, error) - grpc.ClientStream -} - -type metadataServiceGetAgentUpdatesClient struct { - grpc.ClientStream + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "ExpectedState: "+fmt.Sprintf("%#v", this.ExpectedState)+",\n") + s = append(s, "SchemaNames: "+fmt.Sprintf("%#v", this.SchemaNames)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (x *metadataServiceGetAgentUpdatesClient) Recv() (*AgentUpdatesResponse, error) { - m := new(AgentUpdatesResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (this *RemoveFileSourceRequest) GoString() string { + if this == nil { + return "nil" } - return m, nil + s := make([]string, 0, 5) + s = append(s, "&metadatapb.RemoveFileSourceRequest{") + s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (c *metadataServiceClient) GetSchemas(ctx context.Context, in *SchemaRequest, opts ...grpc.CallOption) (*SchemaResponse, error) { - out := new(SchemaResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataService/GetSchemas", in, out, opts...) - if err != nil { - return nil, err +func (this *RemoveFileSourceResponse) GoString() string { + if this == nil { + return "nil" } - return out, nil -} - -func (c *metadataServiceClient) GetAgentInfo(ctx context.Context, in *AgentInfoRequest, opts ...grpc.CallOption) (*AgentInfoResponse, error) { - out := new(AgentInfoResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataService/GetAgentInfo", in, out, opts...) - if err != nil { - return nil, err + s := make([]string, 0, 5) + s = append(s, "&metadatapb.RemoveFileSourceResponse{") + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") } - return out, nil + s = append(s, "}") + return strings.Join(s, "") } - -func (c *metadataServiceClient) GetWithPrefixKey(ctx context.Context, in *WithPrefixKeyRequest, opts ...grpc.CallOption) (*WithPrefixKeyResponse, error) { - out := new(WithPrefixKeyResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataService/GetWithPrefixKey", in, out, opts...) - if err != nil { - return nil, err +func (this *RegisterTracepointRequest) GoString() string { + if this == nil { + return "nil" } - return out, nil -} - -// MetadataServiceServer is the server API for MetadataService service. -type MetadataServiceServer interface { - GetAgentUpdates(*AgentUpdatesRequest, MetadataService_GetAgentUpdatesServer) error - GetSchemas(context.Context, *SchemaRequest) (*SchemaResponse, error) - GetAgentInfo(context.Context, *AgentInfoRequest) (*AgentInfoResponse, error) - GetWithPrefixKey(context.Context, *WithPrefixKeyRequest) (*WithPrefixKeyResponse, error) -} - -// UnimplementedMetadataServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetadataServiceServer struct { -} - -func (*UnimplementedMetadataServiceServer) GetAgentUpdates(req *AgentUpdatesRequest, srv MetadataService_GetAgentUpdatesServer) error { - return status.Errorf(codes.Unimplemented, "method GetAgentUpdates not implemented") -} -func (*UnimplementedMetadataServiceServer) GetSchemas(ctx context.Context, req *SchemaRequest) (*SchemaResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSchemas not implemented") -} -func (*UnimplementedMetadataServiceServer) GetAgentInfo(ctx context.Context, req *AgentInfoRequest) (*AgentInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAgentInfo not implemented") -} -func (*UnimplementedMetadataServiceServer) GetWithPrefixKey(ctx context.Context, req *WithPrefixKeyRequest) (*WithPrefixKeyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetWithPrefixKey not implemented") -} - -func RegisterMetadataServiceServer(s *grpc.Server, srv MetadataServiceServer) { - s.RegisterService(&_MetadataService_serviceDesc, srv) -} - -func _MetadataService_GetAgentUpdates_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(AgentUpdatesRequest) - if err := stream.RecvMsg(m); err != nil { - return err + s := make([]string, 0, 5) + s = append(s, "&metadatapb.RegisterTracepointRequest{") + if this.Requests != nil { + s = append(s, "Requests: "+fmt.Sprintf("%#v", this.Requests)+",\n") } - return srv.(MetadataServiceServer).GetAgentUpdates(m, &metadataServiceGetAgentUpdatesServer{stream}) -} - -type MetadataService_GetAgentUpdatesServer interface { - Send(*AgentUpdatesResponse) error - grpc.ServerStream -} - -type metadataServiceGetAgentUpdatesServer struct { - grpc.ServerStream -} - -func (x *metadataServiceGetAgentUpdatesServer) Send(m *AgentUpdatesResponse) error { - return x.ServerStream.SendMsg(m) + s = append(s, "}") + return strings.Join(s, "") } - -func _MetadataService_GetSchemas_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SchemaRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetadataServiceServer).GetSchemas(ctx, in) +func (this *RegisterTracepointRequest_TracepointRequest) GoString() string { + if this == nil { + return "nil" } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataService/GetSchemas", + s := make([]string, 0, 7) + s = append(s, "&metadatapb.RegisterTracepointRequest_TracepointRequest{") + if this.TracepointDeployment != nil { + s = append(s, "TracepointDeployment: "+fmt.Sprintf("%#v", this.TracepointDeployment)+",\n") } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataServiceServer).GetSchemas(ctx, req.(*SchemaRequest)) + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.TTL != nil { + s = append(s, "TTL: "+fmt.Sprintf("%#v", this.TTL)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "}") + return strings.Join(s, "") } - -func _MetadataService_GetAgentInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AgentInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetadataServiceServer).GetAgentInfo(ctx, in) +func (this *RegisterTracepointResponse) GoString() string { + if this == nil { + return "nil" } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataService/GetAgentInfo", + s := make([]string, 0, 6) + s = append(s, "&metadatapb.RegisterTracepointResponse{") + if this.Tracepoints != nil { + s = append(s, "Tracepoints: "+fmt.Sprintf("%#v", this.Tracepoints)+",\n") } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataServiceServer).GetAgentInfo(ctx, req.(*AgentInfoRequest)) + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "}") + return strings.Join(s, "") } - -func _MetadataService_GetWithPrefixKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WithPrefixKeyRequest) - if err := dec(in); err != nil { - return nil, err +func (this *RegisterTracepointResponse_TracepointStatus) GoString() string { + if this == nil { + return "nil" } - if interceptor == nil { - return srv.(MetadataServiceServer).GetWithPrefixKey(ctx, in) + s := make([]string, 0, 7) + s = append(s, "&metadatapb.RegisterTracepointResponse_TracepointStatus{") + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataService/GetWithPrefixKey", + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataServiceServer).GetWithPrefixKey(ctx, req.(*WithPrefixKeyRequest)) + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GetTracepointInfoRequest) GoString() string { + if this == nil { + return "nil" } - return interceptor(ctx, in, info, handler) + s := make([]string, 0, 5) + s = append(s, "&metadatapb.GetTracepointInfoRequest{") + if this.IDs != nil { + s = append(s, "IDs: "+fmt.Sprintf("%#v", this.IDs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -var _MetadataService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "px.vizier.services.metadata.MetadataService", - HandlerType: (*MetadataServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSchemas", - Handler: _MetadataService_GetSchemas_Handler, - }, - { - MethodName: "GetAgentInfo", - Handler: _MetadataService_GetAgentInfo_Handler, - }, - { - MethodName: "GetWithPrefixKey", - Handler: _MetadataService_GetWithPrefixKey_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "GetAgentUpdates", - Handler: _MetadataService_GetAgentUpdates_Handler, - ServerStreams: true, - }, - }, - Metadata: "src/vizier/services/metadata/metadatapb/service.proto", -} - -// MetadataTracepointServiceClient is the client API for MetadataTracepointService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetadataTracepointServiceClient interface { - RegisterTracepoint(ctx context.Context, in *RegisterTracepointRequest, opts ...grpc.CallOption) (*RegisterTracepointResponse, error) - GetTracepointInfo(ctx context.Context, in *GetTracepointInfoRequest, opts ...grpc.CallOption) (*GetTracepointInfoResponse, error) - RemoveTracepoint(ctx context.Context, in *RemoveTracepointRequest, opts ...grpc.CallOption) (*RemoveTracepointResponse, error) -} - -type metadataTracepointServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetadataTracepointServiceClient(cc *grpc.ClientConn) MetadataTracepointServiceClient { - return &metadataTracepointServiceClient{cc} -} - -func (c *metadataTracepointServiceClient) RegisterTracepoint(ctx context.Context, in *RegisterTracepointRequest, opts ...grpc.CallOption) (*RegisterTracepointResponse, error) { - out := new(RegisterTracepointResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataTracepointService/RegisterTracepoint", in, out, opts...) - if err != nil { - return nil, err +func (this *GetTracepointInfoResponse) GoString() string { + if this == nil { + return "nil" } - return out, nil -} - -func (c *metadataTracepointServiceClient) GetTracepointInfo(ctx context.Context, in *GetTracepointInfoRequest, opts ...grpc.CallOption) (*GetTracepointInfoResponse, error) { - out := new(GetTracepointInfoResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataTracepointService/GetTracepointInfo", in, out, opts...) - if err != nil { - return nil, err + s := make([]string, 0, 5) + s = append(s, "&metadatapb.GetTracepointInfoResponse{") + if this.Tracepoints != nil { + s = append(s, "Tracepoints: "+fmt.Sprintf("%#v", this.Tracepoints)+",\n") } - return out, nil + s = append(s, "}") + return strings.Join(s, "") } - -func (c *metadataTracepointServiceClient) RemoveTracepoint(ctx context.Context, in *RemoveTracepointRequest, opts ...grpc.CallOption) (*RemoveTracepointResponse, error) { - out := new(RemoveTracepointResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataTracepointService/RemoveTracepoint", in, out, opts...) - if err != nil { - return nil, err +func (this *GetTracepointInfoResponse_TracepointState) GoString() string { + if this == nil { + return "nil" } - return out, nil -} - -// MetadataTracepointServiceServer is the server API for MetadataTracepointService service. -type MetadataTracepointServiceServer interface { - RegisterTracepoint(context.Context, *RegisterTracepointRequest) (*RegisterTracepointResponse, error) - GetTracepointInfo(context.Context, *GetTracepointInfoRequest) (*GetTracepointInfoResponse, error) - RemoveTracepoint(context.Context, *RemoveTracepointRequest) (*RemoveTracepointResponse, error) + s := make([]string, 0, 10) + s = append(s, "&metadatapb.GetTracepointInfoResponse_TracepointState{") + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + if this.Statuses != nil { + s = append(s, "Statuses: "+fmt.Sprintf("%#v", this.Statuses)+",\n") + } + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "ExpectedState: "+fmt.Sprintf("%#v", this.ExpectedState)+",\n") + s = append(s, "SchemaNames: "+fmt.Sprintf("%#v", this.SchemaNames)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -// UnimplementedMetadataTracepointServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetadataTracepointServiceServer struct { +func (this *RemoveTracepointRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&metadatapb.RemoveTracepointRequest{") + s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func (*UnimplementedMetadataTracepointServiceServer) RegisterTracepoint(ctx context.Context, req *RegisterTracepointRequest) (*RegisterTracepointResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RegisterTracepoint not implemented") +func (this *RemoveTracepointResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&metadatapb.RemoveTracepointResponse{") + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } -func (*UnimplementedMetadataTracepointServiceServer) GetTracepointInfo(ctx context.Context, req *GetTracepointInfoRequest) (*GetTracepointInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTracepointInfo not implemented") +func (this *UpdateConfigRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&metadatapb.UpdateConfigRequest{") + s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "AgentPodName: "+fmt.Sprintf("%#v", this.AgentPodName)+",\n") + s = append(s, "}") + return strings.Join(s, "") } -func (*UnimplementedMetadataTracepointServiceServer) RemoveTracepoint(ctx context.Context, req *RemoveTracepointRequest) (*RemoveTracepointResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveTracepoint not implemented") +func (this *UpdateConfigResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&metadatapb.UpdateConfigResponse{") + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func RegisterMetadataTracepointServiceServer(s *grpc.Server, srv MetadataTracepointServiceServer) { - s.RegisterService(&_MetadataTracepointService_serviceDesc, srv) +func (this *GetScriptsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&metadatapb.GetScriptsRequest{") + s = append(s, "}") + return strings.Join(s, "") } - -func _MetadataTracepointService_RegisterTracepoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegisterTracepointRequest) - if err := dec(in); err != nil { - return nil, err +func (this *GetScriptsResponse) GoString() string { + if this == nil { + return "nil" } - if interceptor == nil { - return srv.(MetadataTracepointServiceServer).RegisterTracepoint(ctx, in) + s := make([]string, 0, 5) + s = append(s, "&metadatapb.GetScriptsResponse{") + keysForScripts := make([]string, 0, len(this.Scripts)) + for k, _ := range this.Scripts { + keysForScripts = append(keysForScripts, k) } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataTracepointService/RegisterTracepoint", + github_com_gogo_protobuf_sortkeys.Strings(keysForScripts) + mapStringForScripts := "map[string]*cvmsgspb.CronScript{" + for _, k := range keysForScripts { + mapStringForScripts += fmt.Sprintf("%#v: %#v,", k, this.Scripts[k]) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataTracepointServiceServer).RegisterTracepoint(ctx, req.(*RegisterTracepointRequest)) + mapStringForScripts += "}" + if this.Scripts != nil { + s = append(s, "Scripts: "+mapStringForScripts+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "}") + return strings.Join(s, "") } - -func _MetadataTracepointService_GetTracepointInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetTracepointInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetadataTracepointServiceServer).GetTracepointInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataTracepointService/GetTracepointInfo", +func (this *AddOrUpdateScriptRequest) GoString() string { + if this == nil { + return "nil" } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataTracepointServiceServer).GetTracepointInfo(ctx, req.(*GetTracepointInfoRequest)) + s := make([]string, 0, 5) + s = append(s, "&metadatapb.AddOrUpdateScriptRequest{") + if this.Script != nil { + s = append(s, "Script: "+fmt.Sprintf("%#v", this.Script)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "}") + return strings.Join(s, "") } - -func _MetadataTracepointService_RemoveTracepoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveTracepointRequest) - if err := dec(in); err != nil { - return nil, err +func (this *AddOrUpdateScriptResponse) GoString() string { + if this == nil { + return "nil" } - if interceptor == nil { - return srv.(MetadataTracepointServiceServer).RemoveTracepoint(ctx, in) + s := make([]string, 0, 4) + s = append(s, "&metadatapb.AddOrUpdateScriptResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DeleteScriptRequest) GoString() string { + if this == nil { + return "nil" } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataTracepointService/RemoveTracepoint", + s := make([]string, 0, 5) + s = append(s, "&metadatapb.DeleteScriptRequest{") + if this.ScriptID != nil { + s = append(s, "ScriptID: "+fmt.Sprintf("%#v", this.ScriptID)+",\n") } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataTracepointServiceServer).RemoveTracepoint(ctx, req.(*RemoveTracepointRequest)) + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DeleteScriptResponse) GoString() string { + if this == nil { + return "nil" } - return interceptor(ctx, in, info, handler) + s := make([]string, 0, 4) + s = append(s, "&metadatapb.DeleteScriptResponse{") + s = append(s, "}") + return strings.Join(s, "") } - -var _MetadataTracepointService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "px.vizier.services.metadata.MetadataTracepointService", - HandlerType: (*MetadataTracepointServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "RegisterTracepoint", - Handler: _MetadataTracepointService_RegisterTracepoint_Handler, - }, - { - MethodName: "GetTracepointInfo", - Handler: _MetadataTracepointService_GetTracepointInfo_Handler, - }, - { - MethodName: "RemoveTracepoint", - Handler: _MetadataTracepointService_RemoveTracepoint_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "src/vizier/services/metadata/metadatapb/service.proto", +func (this *SetScriptsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&metadatapb.SetScriptsRequest{") + keysForScripts := make([]string, 0, len(this.Scripts)) + for k, _ := range this.Scripts { + keysForScripts = append(keysForScripts, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForScripts) + mapStringForScripts := "map[string]*cvmsgspb.CronScript{" + for _, k := range keysForScripts { + mapStringForScripts += fmt.Sprintf("%#v: %#v,", k, this.Scripts[k]) + } + mapStringForScripts += "}" + if this.Scripts != nil { + s = append(s, "Scripts: "+mapStringForScripts+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -// MetadataConfigServiceClient is the client API for MetadataConfigService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetadataConfigServiceClient interface { - UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) +func (this *SetScriptsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&metadatapb.SetScriptsResponse{") + s = append(s, "}") + return strings.Join(s, "") } - -type metadataConfigServiceClient struct { - cc *grpc.ClientConn +func (this *ExecutionStats) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&metadatapb.ExecutionStats{") + s = append(s, "ExecutionTimeNs: "+fmt.Sprintf("%#v", this.ExecutionTimeNs)+",\n") + s = append(s, "CompilationTimeNs: "+fmt.Sprintf("%#v", this.CompilationTimeNs)+",\n") + s = append(s, "BytesProcessed: "+fmt.Sprintf("%#v", this.BytesProcessed)+",\n") + s = append(s, "RecordsProcessed: "+fmt.Sprintf("%#v", this.RecordsProcessed)+",\n") + s = append(s, "}") + return strings.Join(s, "") } - -func NewMetadataConfigServiceClient(cc *grpc.ClientConn) MetadataConfigServiceClient { - return &metadataConfigServiceClient{cc} +func (this *RecordExecutionResultRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&metadatapb.RecordExecutionResultRequest{") + if this.ScriptID != nil { + s = append(s, "ScriptID: "+fmt.Sprintf("%#v", this.ScriptID)+",\n") + } + if this.Timestamp != nil { + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + } + if this.Result != nil { + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func (c *metadataConfigServiceClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { - out := new(UpdateConfigResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataConfigService/UpdateConfig", in, out, opts...) - if err != nil { - return nil, err +func (this *RecordExecutionResultRequest_Error) GoString() string { + if this == nil { + return "nil" } - return out, nil + s := strings.Join([]string{`&metadatapb.RecordExecutionResultRequest_Error{` + + `Error:` + fmt.Sprintf("%#v", this.Error) + `}`}, ", ") + return s } - -// MetadataConfigServiceServer is the server API for MetadataConfigService service. -type MetadataConfigServiceServer interface { - UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) +func (this *RecordExecutionResultRequest_ExecutionStats) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&metadatapb.RecordExecutionResultRequest_ExecutionStats{` + + `ExecutionStats:` + fmt.Sprintf("%#v", this.ExecutionStats) + `}`}, ", ") + return s } - -// UnimplementedMetadataConfigServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetadataConfigServiceServer struct { +func (this *RecordExecutionResultResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&metadatapb.RecordExecutionResultResponse{") + s = append(s, "}") + return strings.Join(s, "") } - -func (*UnimplementedMetadataConfigServiceServer) UpdateConfig(ctx context.Context, req *UpdateConfigRequest) (*UpdateConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateConfig not implemented") +func (this *GetAllExecutionResultsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&metadatapb.GetAllExecutionResultsRequest{") + s = append(s, "}") + return strings.Join(s, "") } - -func RegisterMetadataConfigServiceServer(s *grpc.Server, srv MetadataConfigServiceServer) { - s.RegisterService(&_MetadataConfigService_serviceDesc, srv) +func (this *GetAllExecutionResultsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&metadatapb.GetAllExecutionResultsResponse{") + if this.Results != nil { + s = append(s, "Results: "+fmt.Sprintf("%#v", this.Results)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") } - -func _MetadataConfigService_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateConfigRequest) - if err := dec(in); err != nil { - return nil, err +func (this *GetAllExecutionResultsResponse_ExecutionResult) GoString() string { + if this == nil { + return "nil" } - if interceptor == nil { - return srv.(MetadataConfigServiceServer).UpdateConfig(ctx, in) + s := make([]string, 0, 8) + s = append(s, "&metadatapb.GetAllExecutionResultsResponse_ExecutionResult{") + if this.ScriptID != nil { + s = append(s, "ScriptID: "+fmt.Sprintf("%#v", this.ScriptID)+",\n") } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataConfigService/UpdateConfig", + if this.Timestamp != nil { + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataConfigServiceServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + if this.Result != nil { + s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") } - return interceptor(ctx, in, info, handler) + s = append(s, "}") + return strings.Join(s, "") } - -var _MetadataConfigService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "px.vizier.services.metadata.MetadataConfigService", - HandlerType: (*MetadataConfigServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "UpdateConfig", - Handler: _MetadataConfigService_UpdateConfig_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "src/vizier/services/metadata/metadatapb/service.proto", +func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&metadatapb.GetAllExecutionResultsResponse_ExecutionResult_Error{` + + `Error:` + fmt.Sprintf("%#v", this.Error) + `}`}, ", ") + return s +} +func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&metadatapb.GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats{` + + `ExecutionStats:` + fmt.Sprintf("%#v", this.ExecutionStats) + `}`}, ", ") + return s +} +func valueToGoStringService(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -// CronScriptStoreServiceClient is the client API for CronScriptStoreService service. +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetadataServiceClient is the client API for MetadataService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type CronScriptStoreServiceClient interface { - GetScripts(ctx context.Context, in *GetScriptsRequest, opts ...grpc.CallOption) (*GetScriptsResponse, error) - AddOrUpdateScript(ctx context.Context, in *AddOrUpdateScriptRequest, opts ...grpc.CallOption) (*AddOrUpdateScriptResponse, error) - DeleteScript(ctx context.Context, in *DeleteScriptRequest, opts ...grpc.CallOption) (*DeleteScriptResponse, error) - SetScripts(ctx context.Context, in *SetScriptsRequest, opts ...grpc.CallOption) (*SetScriptsResponse, error) - RecordExecutionResult(ctx context.Context, in *RecordExecutionResultRequest, opts ...grpc.CallOption) (*RecordExecutionResultResponse, error) - GetAllExecutionResults(ctx context.Context, in *GetAllExecutionResultsRequest, opts ...grpc.CallOption) (*GetAllExecutionResultsResponse, error) +type MetadataServiceClient interface { + GetAgentUpdates(ctx context.Context, in *AgentUpdatesRequest, opts ...grpc.CallOption) (MetadataService_GetAgentUpdatesClient, error) + GetSchemas(ctx context.Context, in *SchemaRequest, opts ...grpc.CallOption) (*SchemaResponse, error) + GetAgentInfo(ctx context.Context, in *AgentInfoRequest, opts ...grpc.CallOption) (*AgentInfoResponse, error) + GetWithPrefixKey(ctx context.Context, in *WithPrefixKeyRequest, opts ...grpc.CallOption) (*WithPrefixKeyResponse, error) } -type cronScriptStoreServiceClient struct { +type metadataServiceClient struct { cc *grpc.ClientConn } -func NewCronScriptStoreServiceClient(cc *grpc.ClientConn) CronScriptStoreServiceClient { - return &cronScriptStoreServiceClient{cc} +func NewMetadataServiceClient(cc *grpc.ClientConn) MetadataServiceClient { + return &metadataServiceClient{cc} } -func (c *cronScriptStoreServiceClient) GetScripts(ctx context.Context, in *GetScriptsRequest, opts ...grpc.CallOption) (*GetScriptsResponse, error) { - out := new(GetScriptsResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/GetScripts", in, out, opts...) +func (c *metadataServiceClient) GetAgentUpdates(ctx context.Context, in *AgentUpdatesRequest, opts ...grpc.CallOption) (MetadataService_GetAgentUpdatesClient, error) { + stream, err := c.cc.NewStream(ctx, &_MetadataService_serviceDesc.Streams[0], "/px.vizier.services.metadata.MetadataService/GetAgentUpdates", opts...) if err != nil { return nil, err } - return out, nil -} - -func (c *cronScriptStoreServiceClient) AddOrUpdateScript(ctx context.Context, in *AddOrUpdateScriptRequest, opts ...grpc.CallOption) (*AddOrUpdateScriptResponse, error) { - out := new(AddOrUpdateScriptResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/AddOrUpdateScript", in, out, opts...) - if err != nil { + x := &metadataServiceGetAgentUpdatesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } - return out, nil -} - -func (c *cronScriptStoreServiceClient) DeleteScript(ctx context.Context, in *DeleteScriptRequest, opts ...grpc.CallOption) (*DeleteScriptResponse, error) { - out := new(DeleteScriptResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/DeleteScript", in, out, opts...) - if err != nil { + if err := x.ClientStream.CloseSend(); err != nil { return nil, err } - return out, nil + return x, nil } -func (c *cronScriptStoreServiceClient) SetScripts(ctx context.Context, in *SetScriptsRequest, opts ...grpc.CallOption) (*SetScriptsResponse, error) { - out := new(SetScriptsResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/SetScripts", in, out, opts...) +type MetadataService_GetAgentUpdatesClient interface { + Recv() (*AgentUpdatesResponse, error) + grpc.ClientStream +} + +type metadataServiceGetAgentUpdatesClient struct { + grpc.ClientStream +} + +func (x *metadataServiceGetAgentUpdatesClient) Recv() (*AgentUpdatesResponse, error) { + m := new(AgentUpdatesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *metadataServiceClient) GetSchemas(ctx context.Context, in *SchemaRequest, opts ...grpc.CallOption) (*SchemaResponse, error) { + out := new(SchemaResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataService/GetSchemas", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *cronScriptStoreServiceClient) RecordExecutionResult(ctx context.Context, in *RecordExecutionResultRequest, opts ...grpc.CallOption) (*RecordExecutionResultResponse, error) { - out := new(RecordExecutionResultResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/RecordExecutionResult", in, out, opts...) +func (c *metadataServiceClient) GetAgentInfo(ctx context.Context, in *AgentInfoRequest, opts ...grpc.CallOption) (*AgentInfoResponse, error) { + out := new(AgentInfoResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataService/GetAgentInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *cronScriptStoreServiceClient) GetAllExecutionResults(ctx context.Context, in *GetAllExecutionResultsRequest, opts ...grpc.CallOption) (*GetAllExecutionResultsResponse, error) { - out := new(GetAllExecutionResultsResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/GetAllExecutionResults", in, out, opts...) +func (c *metadataServiceClient) GetWithPrefixKey(ctx context.Context, in *WithPrefixKeyRequest, opts ...grpc.CallOption) (*WithPrefixKeyResponse, error) { + out := new(WithPrefixKeyResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataService/GetWithPrefixKey", in, out, opts...) if err != nil { return nil, err } return out, nil } -// CronScriptStoreServiceServer is the server API for CronScriptStoreService service. -type CronScriptStoreServiceServer interface { - GetScripts(context.Context, *GetScriptsRequest) (*GetScriptsResponse, error) - AddOrUpdateScript(context.Context, *AddOrUpdateScriptRequest) (*AddOrUpdateScriptResponse, error) - DeleteScript(context.Context, *DeleteScriptRequest) (*DeleteScriptResponse, error) - SetScripts(context.Context, *SetScriptsRequest) (*SetScriptsResponse, error) - RecordExecutionResult(context.Context, *RecordExecutionResultRequest) (*RecordExecutionResultResponse, error) - GetAllExecutionResults(context.Context, *GetAllExecutionResultsRequest) (*GetAllExecutionResultsResponse, error) +// MetadataServiceServer is the server API for MetadataService service. +type MetadataServiceServer interface { + GetAgentUpdates(*AgentUpdatesRequest, MetadataService_GetAgentUpdatesServer) error + GetSchemas(context.Context, *SchemaRequest) (*SchemaResponse, error) + GetAgentInfo(context.Context, *AgentInfoRequest) (*AgentInfoResponse, error) + GetWithPrefixKey(context.Context, *WithPrefixKeyRequest) (*WithPrefixKeyResponse, error) } -// UnimplementedCronScriptStoreServiceServer can be embedded to have forward compatible implementations. -type UnimplementedCronScriptStoreServiceServer struct { +// UnimplementedMetadataServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetadataServiceServer struct { } -func (*UnimplementedCronScriptStoreServiceServer) GetScripts(ctx context.Context, req *GetScriptsRequest) (*GetScriptsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetScripts not implemented") +func (*UnimplementedMetadataServiceServer) GetAgentUpdates(req *AgentUpdatesRequest, srv MetadataService_GetAgentUpdatesServer) error { + return status.Errorf(codes.Unimplemented, "method GetAgentUpdates not implemented") } -func (*UnimplementedCronScriptStoreServiceServer) AddOrUpdateScript(ctx context.Context, req *AddOrUpdateScriptRequest) (*AddOrUpdateScriptResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddOrUpdateScript not implemented") +func (*UnimplementedMetadataServiceServer) GetSchemas(ctx context.Context, req *SchemaRequest) (*SchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchemas not implemented") } -func (*UnimplementedCronScriptStoreServiceServer) DeleteScript(ctx context.Context, req *DeleteScriptRequest) (*DeleteScriptResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteScript not implemented") +func (*UnimplementedMetadataServiceServer) GetAgentInfo(ctx context.Context, req *AgentInfoRequest) (*AgentInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAgentInfo not implemented") } -func (*UnimplementedCronScriptStoreServiceServer) SetScripts(ctx context.Context, req *SetScriptsRequest) (*SetScriptsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetScripts not implemented") +func (*UnimplementedMetadataServiceServer) GetWithPrefixKey(ctx context.Context, req *WithPrefixKeyRequest) (*WithPrefixKeyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetWithPrefixKey not implemented") } -func (*UnimplementedCronScriptStoreServiceServer) RecordExecutionResult(ctx context.Context, req *RecordExecutionResultRequest) (*RecordExecutionResultResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RecordExecutionResult not implemented") + +func RegisterMetadataServiceServer(s *grpc.Server, srv MetadataServiceServer) { + s.RegisterService(&_MetadataService_serviceDesc, srv) } -func (*UnimplementedCronScriptStoreServiceServer) GetAllExecutionResults(ctx context.Context, req *GetAllExecutionResultsRequest) (*GetAllExecutionResultsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllExecutionResults not implemented") + +func _MetadataService_GetAgentUpdates_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(AgentUpdatesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MetadataServiceServer).GetAgentUpdates(m, &metadataServiceGetAgentUpdatesServer{stream}) } -func RegisterCronScriptStoreServiceServer(s *grpc.Server, srv CronScriptStoreServiceServer) { - s.RegisterService(&_CronScriptStoreService_serviceDesc, srv) +type MetadataService_GetAgentUpdatesServer interface { + Send(*AgentUpdatesResponse) error + grpc.ServerStream } -func _CronScriptStoreService_GetScripts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetScriptsRequest) +type metadataServiceGetAgentUpdatesServer struct { + grpc.ServerStream +} + +func (x *metadataServiceGetAgentUpdatesServer) Send(m *AgentUpdatesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _MetadataService_GetSchemas_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SchemaRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(CronScriptStoreServiceServer).GetScripts(ctx, in) + return srv.(MetadataServiceServer).GetSchemas(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/GetScripts", + FullMethod: "/px.vizier.services.metadata.MetadataService/GetSchemas", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CronScriptStoreServiceServer).GetScripts(ctx, req.(*GetScriptsRequest)) + return srv.(MetadataServiceServer).GetSchemas(ctx, req.(*SchemaRequest)) } return interceptor(ctx, in, info, handler) } -func _CronScriptStoreService_AddOrUpdateScript_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddOrUpdateScriptRequest) +func _MetadataService_GetAgentInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AgentInfoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(CronScriptStoreServiceServer).AddOrUpdateScript(ctx, in) + return srv.(MetadataServiceServer).GetAgentInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/AddOrUpdateScript", + FullMethod: "/px.vizier.services.metadata.MetadataService/GetAgentInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CronScriptStoreServiceServer).AddOrUpdateScript(ctx, req.(*AddOrUpdateScriptRequest)) + return srv.(MetadataServiceServer).GetAgentInfo(ctx, req.(*AgentInfoRequest)) } return interceptor(ctx, in, info, handler) } -func _CronScriptStoreService_DeleteScript_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteScriptRequest) +func _MetadataService_GetWithPrefixKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WithPrefixKeyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(CronScriptStoreServiceServer).DeleteScript(ctx, in) + return srv.(MetadataServiceServer).GetWithPrefixKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/DeleteScript", + FullMethod: "/px.vizier.services.metadata.MetadataService/GetWithPrefixKey", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CronScriptStoreServiceServer).DeleteScript(ctx, req.(*DeleteScriptRequest)) + return srv.(MetadataServiceServer).GetWithPrefixKey(ctx, req.(*WithPrefixKeyRequest)) } return interceptor(ctx, in, info, handler) } -func _CronScriptStoreService_SetScripts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetScriptsRequest) +var _MetadataService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "px.vizier.services.metadata.MetadataService", + HandlerType: (*MetadataServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchemas", + Handler: _MetadataService_GetSchemas_Handler, + }, + { + MethodName: "GetAgentInfo", + Handler: _MetadataService_GetAgentInfo_Handler, + }, + { + MethodName: "GetWithPrefixKey", + Handler: _MetadataService_GetWithPrefixKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetAgentUpdates", + Handler: _MetadataService_GetAgentUpdates_Handler, + ServerStreams: true, + }, + }, + Metadata: "src/vizier/services/metadata/metadatapb/service.proto", +} + +// MetadataFileSourceServiceClient is the client API for MetadataFileSourceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetadataFileSourceServiceClient interface { + RegisterFileSource(ctx context.Context, in *RegisterFileSourceRequest, opts ...grpc.CallOption) (*RegisterFileSourceResponse, error) + GetFileSourceInfo(ctx context.Context, in *GetFileSourceInfoRequest, opts ...grpc.CallOption) (*GetFileSourceInfoResponse, error) + RemoveFileSource(ctx context.Context, in *RemoveFileSourceRequest, opts ...grpc.CallOption) (*RemoveFileSourceResponse, error) +} + +type metadataFileSourceServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetadataFileSourceServiceClient(cc *grpc.ClientConn) MetadataFileSourceServiceClient { + return &metadataFileSourceServiceClient{cc} +} + +func (c *metadataFileSourceServiceClient) RegisterFileSource(ctx context.Context, in *RegisterFileSourceRequest, opts ...grpc.CallOption) (*RegisterFileSourceResponse, error) { + out := new(RegisterFileSourceResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataFileSourceService/RegisterFileSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metadataFileSourceServiceClient) GetFileSourceInfo(ctx context.Context, in *GetFileSourceInfoRequest, opts ...grpc.CallOption) (*GetFileSourceInfoResponse, error) { + out := new(GetFileSourceInfoResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataFileSourceService/GetFileSourceInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metadataFileSourceServiceClient) RemoveFileSource(ctx context.Context, in *RemoveFileSourceRequest, opts ...grpc.CallOption) (*RemoveFileSourceResponse, error) { + out := new(RemoveFileSourceResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataFileSourceService/RemoveFileSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetadataFileSourceServiceServer is the server API for MetadataFileSourceService service. +type MetadataFileSourceServiceServer interface { + RegisterFileSource(context.Context, *RegisterFileSourceRequest) (*RegisterFileSourceResponse, error) + GetFileSourceInfo(context.Context, *GetFileSourceInfoRequest) (*GetFileSourceInfoResponse, error) + RemoveFileSource(context.Context, *RemoveFileSourceRequest) (*RemoveFileSourceResponse, error) +} + +// UnimplementedMetadataFileSourceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetadataFileSourceServiceServer struct { +} + +func (*UnimplementedMetadataFileSourceServiceServer) RegisterFileSource(ctx context.Context, req *RegisterFileSourceRequest) (*RegisterFileSourceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RegisterFileSource not implemented") +} +func (*UnimplementedMetadataFileSourceServiceServer) GetFileSourceInfo(ctx context.Context, req *GetFileSourceInfoRequest) (*GetFileSourceInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFileSourceInfo not implemented") +} +func (*UnimplementedMetadataFileSourceServiceServer) RemoveFileSource(ctx context.Context, req *RemoveFileSourceRequest) (*RemoveFileSourceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveFileSource not implemented") +} + +func RegisterMetadataFileSourceServiceServer(s *grpc.Server, srv MetadataFileSourceServiceServer) { + s.RegisterService(&_MetadataFileSourceService_serviceDesc, srv) +} + +func _MetadataFileSourceService_RegisterFileSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegisterFileSourceRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(CronScriptStoreServiceServer).SetScripts(ctx, in) + return srv.(MetadataFileSourceServiceServer).RegisterFileSource(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/SetScripts", + FullMethod: "/px.vizier.services.metadata.MetadataFileSourceService/RegisterFileSource", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CronScriptStoreServiceServer).SetScripts(ctx, req.(*SetScriptsRequest)) + return srv.(MetadataFileSourceServiceServer).RegisterFileSource(ctx, req.(*RegisterFileSourceRequest)) } return interceptor(ctx, in, info, handler) } -func _CronScriptStoreService_RecordExecutionResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RecordExecutionResultRequest) +func _MetadataFileSourceService_GetFileSourceInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFileSourceInfoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(CronScriptStoreServiceServer).RecordExecutionResult(ctx, in) + return srv.(MetadataFileSourceServiceServer).GetFileSourceInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/RecordExecutionResult", + FullMethod: "/px.vizier.services.metadata.MetadataFileSourceService/GetFileSourceInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CronScriptStoreServiceServer).RecordExecutionResult(ctx, req.(*RecordExecutionResultRequest)) + return srv.(MetadataFileSourceServiceServer).GetFileSourceInfo(ctx, req.(*GetFileSourceInfoRequest)) } return interceptor(ctx, in, info, handler) } -func _CronScriptStoreService_GetAllExecutionResults_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllExecutionResultsRequest) +func _MetadataFileSourceService_RemoveFileSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveFileSourceRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(CronScriptStoreServiceServer).GetAllExecutionResults(ctx, in) + return srv.(MetadataFileSourceServiceServer).RemoveFileSource(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/GetAllExecutionResults", + FullMethod: "/px.vizier.services.metadata.MetadataFileSourceService/RemoveFileSource", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CronScriptStoreServiceServer).GetAllExecutionResults(ctx, req.(*GetAllExecutionResultsRequest)) + return srv.(MetadataFileSourceServiceServer).RemoveFileSource(ctx, req.(*RemoveFileSourceRequest)) } return interceptor(ctx, in, info, handler) } -var _CronScriptStoreService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "px.vizier.services.metadata.CronScriptStoreService", - HandlerType: (*CronScriptStoreServiceServer)(nil), +var _MetadataFileSourceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "px.vizier.services.metadata.MetadataFileSourceService", + HandlerType: (*MetadataFileSourceServiceServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "GetScripts", - Handler: _CronScriptStoreService_GetScripts_Handler, - }, - { - MethodName: "AddOrUpdateScript", - Handler: _CronScriptStoreService_AddOrUpdateScript_Handler, - }, - { - MethodName: "DeleteScript", - Handler: _CronScriptStoreService_DeleteScript_Handler, - }, - { - MethodName: "SetScripts", - Handler: _CronScriptStoreService_SetScripts_Handler, + MethodName: "RegisterFileSource", + Handler: _MetadataFileSourceService_RegisterFileSource_Handler, }, { - MethodName: "RecordExecutionResult", - Handler: _CronScriptStoreService_RecordExecutionResult_Handler, + MethodName: "GetFileSourceInfo", + Handler: _MetadataFileSourceService_GetFileSourceInfo_Handler, }, { - MethodName: "GetAllExecutionResults", - Handler: _CronScriptStoreService_GetAllExecutionResults_Handler, + MethodName: "RemoveFileSource", + Handler: _MetadataFileSourceService_RemoveFileSource_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "src/vizier/services/metadata/metadatapb/service.proto", } -func (m *SchemaRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// MetadataTracepointServiceClient is the client API for MetadataTracepointService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetadataTracepointServiceClient interface { + RegisterTracepoint(ctx context.Context, in *RegisterTracepointRequest, opts ...grpc.CallOption) (*RegisterTracepointResponse, error) + GetTracepointInfo(ctx context.Context, in *GetTracepointInfoRequest, opts ...grpc.CallOption) (*GetTracepointInfoResponse, error) + RemoveTracepoint(ctx context.Context, in *RemoveTracepointRequest, opts ...grpc.CallOption) (*RemoveTracepointResponse, error) } -func (m *SchemaRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type metadataTracepointServiceClient struct { + cc *grpc.ClientConn } -func (m *SchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func NewMetadataTracepointServiceClient(cc *grpc.ClientConn) MetadataTracepointServiceClient { + return &metadataTracepointServiceClient{cc} } -func (m *SchemaResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *metadataTracepointServiceClient) RegisterTracepoint(ctx context.Context, in *RegisterTracepointRequest, opts ...grpc.CallOption) (*RegisterTracepointResponse, error) { + out := new(RegisterTracepointResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataTracepointService/RegisterTracepoint", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil -} - -func (m *SchemaResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return out, nil } -func (m *SchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 +func (c *metadataTracepointServiceClient) GetTracepointInfo(ctx context.Context, in *GetTracepointInfoRequest, opts ...grpc.CallOption) (*GetTracepointInfoResponse, error) { + out := new(GetTracepointInfoResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataTracepointService/GetTracepointInfo", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *AgentInfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *metadataTracepointServiceClient) RemoveTracepoint(ctx context.Context, in *RemoveTracepointRequest, opts ...grpc.CallOption) (*RemoveTracepointResponse, error) { + out := new(RemoveTracepointResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataTracepointService/RemoveTracepoint", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *AgentInfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// MetadataTracepointServiceServer is the server API for MetadataTracepointService service. +type MetadataTracepointServiceServer interface { + RegisterTracepoint(context.Context, *RegisterTracepointRequest) (*RegisterTracepointResponse, error) + GetTracepointInfo(context.Context, *GetTracepointInfoRequest) (*GetTracepointInfoResponse, error) + RemoveTracepoint(context.Context, *RemoveTracepointRequest) (*RemoveTracepointResponse, error) } -func (m *AgentInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +// UnimplementedMetadataTracepointServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetadataTracepointServiceServer struct { } -func (m *AgentInfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (*UnimplementedMetadataTracepointServiceServer) RegisterTracepoint(ctx context.Context, req *RegisterTracepointRequest) (*RegisterTracepointResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RegisterTracepoint not implemented") +} +func (*UnimplementedMetadataTracepointServiceServer) GetTracepointInfo(ctx context.Context, req *GetTracepointInfoRequest) (*GetTracepointInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTracepointInfo not implemented") +} +func (*UnimplementedMetadataTracepointServiceServer) RemoveTracepoint(ctx context.Context, req *RemoveTracepointRequest) (*RemoveTracepointResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveTracepoint not implemented") } -func (m *AgentInfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func RegisterMetadataTracepointServiceServer(s *grpc.Server, srv MetadataTracepointServiceServer) { + s.RegisterService(&_MetadataTracepointService_serviceDesc, srv) } -func (m *AgentInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Info) > 0 { - for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func _MetadataTracepointService_RegisterTracepoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegisterTracepointRequest) + if err := dec(in); err != nil { + return nil, err } - return len(dAtA) - i, nil + if interceptor == nil { + return srv.(MetadataTracepointServiceServer).RegisterTracepoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.MetadataTracepointService/RegisterTracepoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataTracepointServiceServer).RegisterTracepoint(ctx, req.(*RegisterTracepointRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *AgentMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func _MetadataTracepointService_GetTracepointInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTracepointInfoRequest) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(MetadataTracepointServiceServer).GetTracepointInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.MetadataTracepointService/GetTracepointInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataTracepointServiceServer).GetTracepointInfo(ctx, req.(*GetTracepointInfoRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *AgentMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func _MetadataTracepointService_RemoveTracepoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveTracepointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetadataTracepointServiceServer).RemoveTracepoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.MetadataTracepointService/RemoveTracepoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataTracepointServiceServer).RemoveTracepoint(ctx, req.(*RemoveTracepointRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *AgentMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CarnotInfo != nil { +var _MetadataTracepointService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "px.vizier.services.metadata.MetadataTracepointService", + HandlerType: (*MetadataTracepointServiceServer)(nil), + Methods: []grpc.MethodDesc{ { - size, err := m.CarnotInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Status != nil { + MethodName: "RegisterTracepoint", + Handler: _MetadataTracepointService_RegisterTracepoint_Handler, + }, { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Agent != nil { + MethodName: "GetTracepointInfo", + Handler: _MetadataTracepointService_GetTracepointInfo_Handler, + }, { - size, err := m.Agent.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + MethodName: "RemoveTracepoint", + Handler: _MetadataTracepointService_RemoveTracepoint_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "src/vizier/services/metadata/metadatapb/service.proto", } -func (m *AgentUpdatesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +// MetadataConfigServiceClient is the client API for MetadataConfigService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetadataConfigServiceClient interface { + UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) +} + +type metadataConfigServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetadataConfigServiceClient(cc *grpc.ClientConn) MetadataConfigServiceClient { + return &metadataConfigServiceClient{cc} +} + +func (c *metadataConfigServiceClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { + out := new(UpdateConfigResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataConfigService/UpdateConfig", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *AgentUpdatesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// MetadataConfigServiceServer is the server API for MetadataConfigService service. +type MetadataConfigServiceServer interface { + UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) } -func (m *AgentUpdatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MaxUpdatesPerResponse != 0 { - i = encodeVarintService(dAtA, i, uint64(m.MaxUpdatesPerResponse)) - i-- - dAtA[i] = 0x10 - } - if m.MaxUpdateInterval != nil { - { - size, err := m.MaxUpdateInterval.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +// UnimplementedMetadataConfigServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetadataConfigServiceServer struct { } -func (m *AgentUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (*UnimplementedMetadataConfigServiceServer) UpdateConfig(ctx context.Context, req *UpdateConfigRequest) (*UpdateConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateConfig not implemented") } -func (m *AgentUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func RegisterMetadataConfigServiceServer(s *grpc.Server, srv MetadataConfigServiceServer) { + s.RegisterService(&_MetadataConfigService_serviceDesc, srv) } -func (m *AgentUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Update != nil { - { - size := m.Update.Size() - i -= size - if _, err := m.Update.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } +func _MetadataConfigService_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConfigRequest) + if err := dec(in); err != nil { + return nil, err } - if m.AgentID != nil { - { - size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if interceptor == nil { + return srv.(MetadataConfigServiceServer).UpdateConfig(ctx, in) } - return len(dAtA) - i, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.MetadataConfigService/UpdateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetadataConfigServiceServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *AgentUpdate_Deleted) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var _MetadataConfigService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "px.vizier.services.metadata.MetadataConfigService", + HandlerType: (*MetadataConfigServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpdateConfig", + Handler: _MetadataConfigService_UpdateConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "src/vizier/services/metadata/metadatapb/service.proto", } -func (m *AgentUpdate_Deleted) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i-- - if m.Deleted { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *AgentUpdate_Agent) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// CronScriptStoreServiceClient is the client API for CronScriptStoreService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CronScriptStoreServiceClient interface { + GetScripts(ctx context.Context, in *GetScriptsRequest, opts ...grpc.CallOption) (*GetScriptsResponse, error) + AddOrUpdateScript(ctx context.Context, in *AddOrUpdateScriptRequest, opts ...grpc.CallOption) (*AddOrUpdateScriptResponse, error) + DeleteScript(ctx context.Context, in *DeleteScriptRequest, opts ...grpc.CallOption) (*DeleteScriptResponse, error) + SetScripts(ctx context.Context, in *SetScriptsRequest, opts ...grpc.CallOption) (*SetScriptsResponse, error) + RecordExecutionResult(ctx context.Context, in *RecordExecutionResultRequest, opts ...grpc.CallOption) (*RecordExecutionResultResponse, error) + GetAllExecutionResults(ctx context.Context, in *GetAllExecutionResultsRequest, opts ...grpc.CallOption) (*GetAllExecutionResultsResponse, error) } -func (m *AgentUpdate_Agent) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Agent != nil { - { - size, err := m.Agent.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil +type cronScriptStoreServiceClient struct { + cc *grpc.ClientConn } -func (m *AgentUpdate_DataInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +func NewCronScriptStoreServiceClient(cc *grpc.ClientConn) CronScriptStoreServiceClient { + return &cronScriptStoreServiceClient{cc} } -func (m *AgentUpdate_DataInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DataInfo != nil { - { - size, err := m.DataInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 +func (c *cronScriptStoreServiceClient) GetScripts(ctx context.Context, in *GetScriptsRequest, opts ...grpc.CallOption) (*GetScriptsResponse, error) { + out := new(GetScriptsResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/GetScripts", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *AgentUpdatesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + +func (c *cronScriptStoreServiceClient) AddOrUpdateScript(ctx context.Context, in *AddOrUpdateScriptRequest, opts ...grpc.CallOption) (*AddOrUpdateScriptResponse, error) { + out := new(AddOrUpdateScriptResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/AddOrUpdateScript", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *AgentUpdatesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *cronScriptStoreServiceClient) DeleteScript(ctx context.Context, in *DeleteScriptRequest, opts ...grpc.CallOption) (*DeleteScriptResponse, error) { + out := new(DeleteScriptResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/DeleteScript", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *AgentUpdatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.EndOfVersion { - i-- - if m.EndOfVersion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.AgentSchemasUpdated { - i-- - if m.AgentSchemasUpdated { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.AgentSchemas) > 0 { - for iNdEx := len(m.AgentSchemas) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AgentSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } +func (c *cronScriptStoreServiceClient) SetScripts(ctx context.Context, in *SetScriptsRequest, opts ...grpc.CallOption) (*SetScriptsResponse, error) { + out := new(SetScriptsResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/SetScripts", in, out, opts...) + if err != nil { + return nil, err } - if len(m.AgentUpdates) > 0 { - for iNdEx := len(m.AgentUpdates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AgentUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + return out, nil +} + +func (c *cronScriptStoreServiceClient) RecordExecutionResult(ctx context.Context, in *RecordExecutionResultRequest, opts ...grpc.CallOption) (*RecordExecutionResultResponse, error) { + out := new(RecordExecutionResultResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/RecordExecutionResult", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *WithPrefixKeyRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *cronScriptStoreServiceClient) GetAllExecutionResults(ctx context.Context, in *GetAllExecutionResultsRequest, opts ...grpc.CallOption) (*GetAllExecutionResultsResponse, error) { + out := new(GetAllExecutionResultsResponse) + err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.CronScriptStoreService/GetAllExecutionResults", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *WithPrefixKeyRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// CronScriptStoreServiceServer is the server API for CronScriptStoreService service. +type CronScriptStoreServiceServer interface { + GetScripts(context.Context, *GetScriptsRequest) (*GetScriptsResponse, error) + AddOrUpdateScript(context.Context, *AddOrUpdateScriptRequest) (*AddOrUpdateScriptResponse, error) + DeleteScript(context.Context, *DeleteScriptRequest) (*DeleteScriptResponse, error) + SetScripts(context.Context, *SetScriptsRequest) (*SetScriptsResponse, error) + RecordExecutionResult(context.Context, *RecordExecutionResultRequest) (*RecordExecutionResultResponse, error) + GetAllExecutionResults(context.Context, *GetAllExecutionResultsRequest) (*GetAllExecutionResultsResponse, error) } -func (m *WithPrefixKeyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Proto) > 0 { - i -= len(m.Proto) - copy(dAtA[i:], m.Proto) - i = encodeVarintService(dAtA, i, uint64(len(m.Proto))) - i-- - dAtA[i] = 0x12 +// UnimplementedCronScriptStoreServiceServer can be embedded to have forward compatible implementations. +type UnimplementedCronScriptStoreServiceServer struct { +} + +func (*UnimplementedCronScriptStoreServiceServer) GetScripts(ctx context.Context, req *GetScriptsRequest) (*GetScriptsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetScripts not implemented") +} +func (*UnimplementedCronScriptStoreServiceServer) AddOrUpdateScript(ctx context.Context, req *AddOrUpdateScriptRequest) (*AddOrUpdateScriptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddOrUpdateScript not implemented") +} +func (*UnimplementedCronScriptStoreServiceServer) DeleteScript(ctx context.Context, req *DeleteScriptRequest) (*DeleteScriptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteScript not implemented") +} +func (*UnimplementedCronScriptStoreServiceServer) SetScripts(ctx context.Context, req *SetScriptsRequest) (*SetScriptsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetScripts not implemented") +} +func (*UnimplementedCronScriptStoreServiceServer) RecordExecutionResult(ctx context.Context, req *RecordExecutionResultRequest) (*RecordExecutionResultResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RecordExecutionResult not implemented") +} +func (*UnimplementedCronScriptStoreServiceServer) GetAllExecutionResults(ctx context.Context, req *GetAllExecutionResultsRequest) (*GetAllExecutionResultsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAllExecutionResults not implemented") +} + +func RegisterCronScriptStoreServiceServer(s *grpc.Server, srv CronScriptStoreServiceServer) { + s.RegisterService(&_CronScriptStoreService_serviceDesc, srv) +} + +func _CronScriptStoreService_GetScripts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetScriptsRequest) + if err := dec(in); err != nil { + return nil, err } - if len(m.Prefix) > 0 { - i -= len(m.Prefix) - copy(dAtA[i:], m.Prefix) - i = encodeVarintService(dAtA, i, uint64(len(m.Prefix))) - i-- - dAtA[i] = 0xa + if interceptor == nil { + return srv.(CronScriptStoreServiceServer).GetScripts(ctx, in) } - return len(dAtA) - i, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/GetScripts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CronScriptStoreServiceServer).GetScripts(ctx, req.(*GetScriptsRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *WithPrefixKeyResponse) Marshal() (dAtA []byte, err error) { +func _CronScriptStoreService_AddOrUpdateScript_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddOrUpdateScriptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CronScriptStoreServiceServer).AddOrUpdateScript(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/AddOrUpdateScript", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CronScriptStoreServiceServer).AddOrUpdateScript(ctx, req.(*AddOrUpdateScriptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CronScriptStoreService_DeleteScript_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteScriptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CronScriptStoreServiceServer).DeleteScript(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/DeleteScript", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CronScriptStoreServiceServer).DeleteScript(ctx, req.(*DeleteScriptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CronScriptStoreService_SetScripts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetScriptsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CronScriptStoreServiceServer).SetScripts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/SetScripts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CronScriptStoreServiceServer).SetScripts(ctx, req.(*SetScriptsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CronScriptStoreService_RecordExecutionResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RecordExecutionResultRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CronScriptStoreServiceServer).RecordExecutionResult(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/RecordExecutionResult", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CronScriptStoreServiceServer).RecordExecutionResult(ctx, req.(*RecordExecutionResultRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CronScriptStoreService_GetAllExecutionResults_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllExecutionResultsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CronScriptStoreServiceServer).GetAllExecutionResults(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/px.vizier.services.metadata.CronScriptStoreService/GetAllExecutionResults", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CronScriptStoreServiceServer).GetAllExecutionResults(ctx, req.(*GetAllExecutionResultsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CronScriptStoreService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "px.vizier.services.metadata.CronScriptStoreService", + HandlerType: (*CronScriptStoreServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetScripts", + Handler: _CronScriptStoreService_GetScripts_Handler, + }, + { + MethodName: "AddOrUpdateScript", + Handler: _CronScriptStoreService_AddOrUpdateScript_Handler, + }, + { + MethodName: "DeleteScript", + Handler: _CronScriptStoreService_DeleteScript_Handler, + }, + { + MethodName: "SetScripts", + Handler: _CronScriptStoreService_SetScripts_Handler, + }, + { + MethodName: "RecordExecutionResult", + Handler: _CronScriptStoreService_RecordExecutionResult_Handler, + }, + { + MethodName: "GetAllExecutionResults", + Handler: _CronScriptStoreService_GetAllExecutionResults_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "src/vizier/services/metadata/metadatapb/service.proto", +} + +func (m *SchemaRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4910,34 +5417,20 @@ func (m *WithPrefixKeyResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WithPrefixKeyResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *SchemaRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WithPrefixKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Kvs) > 0 { - for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *WithPrefixKeyResponse_KV) Marshal() (dAtA []byte, err error) { +func (m *SchemaResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4947,34 +5440,32 @@ func (m *WithPrefixKeyResponse_KV) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WithPrefixKeyResponse_KV) MarshalTo(dAtA []byte) (int, error) { +func (m *SchemaResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WithPrefixKeyResponse_KV) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintService(dAtA, i, uint64(len(m.Value))) + if m.Schema != nil { + { + size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintService(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *RegisterTracepointRequest) Marshal() (dAtA []byte, err error) { +func (m *AgentInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4984,34 +5475,20 @@ func (m *RegisterTracepointRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RegisterTracepointRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *AgentInfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AgentInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Requests) > 0 { - for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *RegisterTracepointRequest_TracepointRequest) Marshal() (dAtA []byte, err error) { +func (m *AgentInfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5021,38 +5498,80 @@ func (m *RegisterTracepointRequest_TracepointRequest) Marshal() (dAtA []byte, er return dAtA[:n], nil } -func (m *RegisterTracepointRequest_TracepointRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *AgentInfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointRequest_TracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AgentInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.TTL != nil { - { - size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } + if len(m.Info) > 0 { + for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AgentMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AgentMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AgentMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CarnotInfo != nil { + { + size, err := m.CarnotInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } i -= size i = encodeVarintService(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } - if m.TracepointDeployment != nil { + if m.Agent != nil { { - size, err := m.TracepointDeployment.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Agent.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5065,7 +5584,7 @@ func (m *RegisterTracepointRequest_TracepointRequest) MarshalToSizedBuffer(dAtA return len(dAtA) - i, nil } -func (m *RegisterTracepointResponse) Marshal() (dAtA []byte, err error) { +func (m *AgentUpdatesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5075,19 +5594,24 @@ func (m *RegisterTracepointResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RegisterTracepointResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *AgentUpdatesRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AgentUpdatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Status != nil { + if m.MaxUpdatesPerResponse != 0 { + i = encodeVarintService(dAtA, i, uint64(m.MaxUpdatesPerResponse)) + i-- + dAtA[i] = 0x10 + } + if m.MaxUpdateInterval != nil { { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.MaxUpdateInterval.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5095,26 +5619,12 @@ func (m *RegisterTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, err i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - if len(m.Tracepoints) > 0 { - for iNdEx := len(m.Tracepoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tracepoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RegisterTracepointResponse_TracepointStatus) Marshal() (dAtA []byte, err error) { +func (m *AgentUpdate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5124,26 +5634,28 @@ func (m *RegisterTracepointResponse_TracepointStatus) Marshal() (dAtA []byte, er return dAtA[:n], nil } -func (m *RegisterTracepointResponse_TracepointStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *AgentUpdate) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointResponse_TracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AgentUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a + if m.Update != nil { + { + size := m.Update.Size() + i -= size + if _, err := m.Update.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } - if m.ID != nil { + if m.AgentID != nil { { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5151,11 +5663,38 @@ func (m *RegisterTracepointResponse_TracepointStatus) MarshalToSizedBuffer(dAtA i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } - if m.Status != nil { + return len(dAtA) - i, nil +} + +func (m *AgentUpdate_Deleted) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AgentUpdate_Deleted) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.Deleted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *AgentUpdate_Agent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AgentUpdate_Agent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Agent != nil { { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Agent.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5163,12 +5702,32 @@ func (m *RegisterTracepointResponse_TracepointStatus) MarshalToSizedBuffer(dAtA i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } return len(dAtA) - i, nil } +func (m *AgentUpdate_DataInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} -func (m *GetTracepointInfoRequest) Marshal() (dAtA []byte, err error) { +func (m *AgentUpdate_DataInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DataInfo != nil { + { + size, err := m.DataInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *AgentUpdatesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5178,20 +5737,54 @@ func (m *GetTracepointInfoRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTracepointInfoRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *AgentUpdatesResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetTracepointInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AgentUpdatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.IDs) > 0 { - for iNdEx := len(m.IDs) - 1; iNdEx >= 0; iNdEx-- { + if m.EndOfVersion { + i-- + if m.EndOfVersion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.AgentSchemasUpdated { + i-- + if m.AgentSchemasUpdated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.AgentSchemas) > 0 { + for iNdEx := len(m.AgentSchemas) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.IDs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.AgentSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.AgentUpdates) > 0 { + for iNdEx := len(m.AgentUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AgentUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5205,7 +5798,7 @@ func (m *GetTracepointInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *GetTracepointInfoResponse) Marshal() (dAtA []byte, err error) { +func (m *WithPrefixKeyRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5215,34 +5808,34 @@ func (m *GetTracepointInfoResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTracepointInfoResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *WithPrefixKeyRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetTracepointInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *WithPrefixKeyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Tracepoints) > 0 { - for iNdEx := len(m.Tracepoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tracepoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + if len(m.Proto) > 0 { + i -= len(m.Proto) + copy(dAtA[i:], m.Proto) + i = encodeVarintService(dAtA, i, uint64(len(m.Proto))) + i-- + dAtA[i] = 0x12 + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintService(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetTracepointInfoResponse_TracepointState) Marshal() (dAtA []byte, err error) { +func (m *WithPrefixKeyResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5252,41 +5845,20 @@ func (m *GetTracepointInfoResponse_TracepointState) Marshal() (dAtA []byte, err return dAtA[:n], nil } -func (m *GetTracepointInfoResponse_TracepointState) MarshalTo(dAtA []byte) (int, error) { +func (m *WithPrefixKeyResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetTracepointInfoResponse_TracepointState) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *WithPrefixKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.SchemaNames) > 0 { - for iNdEx := len(m.SchemaNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.SchemaNames[iNdEx]) - copy(dAtA[i:], m.SchemaNames[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.SchemaNames[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.ExpectedState != 0 { - i = encodeVarintService(dAtA, i, uint64(m.ExpectedState)) - i-- - dAtA[i] = 0x28 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - } - if len(m.Statuses) > 0 { - for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Kvs) > 0 { + for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5294,30 +5866,13 @@ func (m *GetTracepointInfoResponse_TracepointState) MarshalToSizedBuffer(dAtA [] i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - } - if m.State != 0 { - i = encodeVarintService(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - if m.ID != nil { - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RemoveTracepointRequest) Marshal() (dAtA []byte, err error) { +func (m *WithPrefixKeyResponse_KV) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5327,29 +5882,34 @@ func (m *RemoveTracepointRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveTracepointRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *WithPrefixKeyResponse_KV) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RemoveTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *WithPrefixKeyResponse_KV) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Names) > 0 { - for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Names[iNdEx]) - copy(dAtA[i:], m.Names[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Names[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintService(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintService(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RemoveTracepointResponse) Marshal() (dAtA []byte, err error) { +func (m *RegisterFileSourceRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5359,32 +5919,34 @@ func (m *RemoveTracepointResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveTracepointResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RemoveTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { +func (m *RegisterFileSourceResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5394,41 +5956,46 @@ func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateConfigRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterFileSourceResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UpdateConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterFileSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.AgentPodName) > 0 { - i -= len(m.AgentPodName) - copy(dAtA[i:], m.AgentPodName) - i = encodeVarintService(dAtA, i, uint64(len(m.AgentPodName))) - i-- - dAtA[i] = 0x1a - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintService(dAtA, i, uint64(len(m.Value))) + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintService(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if len(m.FileSources) > 0 { + for iNdEx := len(m.FileSources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FileSources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { +func (m *RegisterFileSourceResponse_FileSourceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5438,16 +6005,35 @@ func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateConfigResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterFileSourceResponse_FileSourceStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UpdateConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterFileSourceResponse_FileSourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.ID != nil { + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.Status != nil { { size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) @@ -5463,7 +6049,7 @@ func (m *UpdateConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *GetScriptsRequest) Marshal() (dAtA []byte, err error) { +func (m *GetFileSourceInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5473,20 +6059,34 @@ func (m *GetScriptsRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetScriptsRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetFileSourceInfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetFileSourceInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.IDs) > 0 { + for iNdEx := len(m.IDs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IDs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *GetScriptsResponse) Marshal() (dAtA []byte, err error) { +func (m *GetFileSourceInfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5496,38 +6096,26 @@ func (m *GetScriptsResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetScriptsResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetFileSourceInfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetFileSourceInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Scripts) > 0 { - for k := range m.Scripts { - v := m.Scripts[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + if len(m.FileSources) > 0 { + for iNdEx := len(m.FileSources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FileSources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintService(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintService(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0xa } @@ -5535,7 +6123,7 @@ func (m *GetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *AddOrUpdateScriptRequest) Marshal() (dAtA []byte, err error) { +func (m *GetFileSourceInfoResponse_FileSourceState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5545,19 +6133,59 @@ func (m *AddOrUpdateScriptRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AddOrUpdateScriptRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetFileSourceInfoResponse_FileSourceState) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AddOrUpdateScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetFileSourceInfoResponse_FileSourceState) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Script != nil { + if len(m.SchemaNames) > 0 { + for iNdEx := len(m.SchemaNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SchemaNames[iNdEx]) + copy(dAtA[i:], m.SchemaNames[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.SchemaNames[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.ExpectedState != 0 { + i = encodeVarintService(dAtA, i, uint64(m.ExpectedState)) + i-- + dAtA[i] = 0x28 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if len(m.Statuses) > 0 { + for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.State != 0 { + i = encodeVarintService(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if m.ID != nil { { - size, err := m.Script.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5570,7 +6198,7 @@ func (m *AddOrUpdateScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *AddOrUpdateScriptResponse) Marshal() (dAtA []byte, err error) { +func (m *RemoveFileSourceRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5580,22 +6208,31 @@ func (m *AddOrUpdateScriptResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AddOrUpdateScriptResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *RemoveFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AddOrUpdateScriptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RemoveFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - return len(dAtA) - i, nil -} - -func (m *DeleteScriptRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RemoveFileSourceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err @@ -5603,19 +6240,19 @@ func (m *DeleteScriptRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DeleteScriptRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RemoveFileSourceResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DeleteScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RemoveFileSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ScriptID != nil { + if m.Status != nil { { - size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5628,30 +6265,7 @@ func (m *DeleteScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DeleteScriptResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteScriptResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteScriptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *SetScriptsRequest) Marshal() (dAtA []byte, err error) { +func (m *RegisterTracepointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5661,38 +6275,26 @@ func (m *SetScriptsRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetScriptsRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterTracepointRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Scripts) > 0 { - for k := range m.Scripts { - v := m.Scripts[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintService(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintService(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0xa } @@ -5700,30 +6302,7 @@ func (m *SetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SetScriptsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetScriptsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ExecutionStats) Marshal() (dAtA []byte, err error) { +func (m *RegisterTracepointRequest_TracepointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5733,40 +6312,51 @@ func (m *ExecutionStats) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExecutionStats) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterTracepointRequest_TracepointRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterTracepointRequest_TracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.RecordsProcessed != 0 { - i = encodeVarintService(dAtA, i, uint64(m.RecordsProcessed)) - i-- - dAtA[i] = 0x20 - } - if m.BytesProcessed != 0 { - i = encodeVarintService(dAtA, i, uint64(m.BytesProcessed)) + if m.TTL != nil { + { + size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x18 + dAtA[i] = 0x1a } - if m.CompilationTimeNs != 0 { - i = encodeVarintService(dAtA, i, uint64(m.CompilationTimeNs)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } - if m.ExecutionTimeNs != 0 { - i = encodeVarintService(dAtA, i, uint64(m.ExecutionTimeNs)) + if m.TracepointDeployment != nil { + { + size, err := m.TracepointDeployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RecordExecutionResultRequest) Marshal() (dAtA []byte, err error) { +func (m *RegisterTracepointResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5776,28 +6366,19 @@ func (m *RecordExecutionResultRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RecordExecutionResultRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterTracepointResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RecordExecutionResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Result != nil { - { - size := m.Result.Size() - i -= size - if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Timestamp != nil { + if m.Status != nil { { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5807,31 +6388,53 @@ func (m *RecordExecutionResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, e i-- dAtA[i] = 0x12 } - if m.ScriptID != nil { - { - size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Tracepoints) > 0 { + for iNdEx := len(m.Tracepoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tracepoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RecordExecutionResultRequest_Error) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterTracepointResponse_TracepointStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegisterTracepointResponse_TracepointStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RecordExecutionResultRequest_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterTracepointResponse_TracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Error != nil { + _ = i + var l int + _ = l + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.ID != nil { { - size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5839,20 +6442,11 @@ func (m *RecordExecutionResultRequest_Error) MarshalToSizedBuffer(dAtA []byte) ( i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } - return len(dAtA) - i, nil -} -func (m *RecordExecutionResultRequest_ExecutionStats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RecordExecutionResultRequest_ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ExecutionStats != nil { + if m.Status != nil { { - size, err := m.ExecutionStats.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5860,11 +6454,12 @@ func (m *RecordExecutionResultRequest_ExecutionStats) MarshalToSizedBuffer(dAtA i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RecordExecutionResultResponse) Marshal() (dAtA []byte, err error) { + +func (m *GetTracepointInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5874,20 +6469,34 @@ func (m *RecordExecutionResultResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RecordExecutionResultResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetTracepointInfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RecordExecutionResultResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetTracepointInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.IDs) > 0 { + for iNdEx := len(m.IDs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IDs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *GetAllExecutionResultsRequest) Marshal() (dAtA []byte, err error) { +func (m *GetTracepointInfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5897,20 +6506,34 @@ func (m *GetAllExecutionResultsRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetAllExecutionResultsRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetTracepointInfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetAllExecutionResultsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetTracepointInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.Tracepoints) > 0 { + for iNdEx := len(m.Tracepoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tracepoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *GetAllExecutionResultsResponse) Marshal() (dAtA []byte, err error) { +func (m *GetTracepointInfoResponse_TracepointState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5920,20 +6543,41 @@ func (m *GetAllExecutionResultsResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetAllExecutionResultsResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetTracepointInfoResponse_TracepointState) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetAllExecutionResultsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetTracepointInfoResponse_TracepointState) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + if len(m.SchemaNames) > 0 { + for iNdEx := len(m.SchemaNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SchemaNames[iNdEx]) + copy(dAtA[i:], m.SchemaNames[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.SchemaNames[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.ExpectedState != 0 { + i = encodeVarintService(dAtA, i, uint64(m.ExpectedState)) + i-- + dAtA[i] = 0x28 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if len(m.Statuses) > 0 { + for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5941,13 +6585,30 @@ func (m *GetAllExecutionResultsResponse) MarshalToSizedBuffer(dAtA []byte) (int, i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a + } + } + if m.State != 0 { + i = encodeVarintService(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + if m.ID != nil { + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetAllExecutionResultsResponse_ExecutionResult) Marshal() (dAtA []byte, err error) { +func (m *RemoveTracepointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5957,40 +6618,51 @@ func (m *GetAllExecutionResultsResponse_ExecutionResult) Marshal() (dAtA []byte, return dAtA[:n], nil } -func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalTo(dAtA []byte) (int, error) { +func (m *RemoveTracepointRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RemoveTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Result != nil { - { - size := m.Result.Size() - i -= size - if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa } } - if m.Timestamp != nil { - { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *RemoveTracepointResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.ScriptID != nil { + return dAtA[:n], nil +} + +func (m *RemoveTracepointResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != nil { { - size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6003,37 +6675,73 @@ func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalToSizedBuffer(dA return len(dAtA) - i, nil } -func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) MarshalTo(dAtA []byte) (int, error) { +func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UpdateConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Error != nil { - { - size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } + _ = i + var l int + _ = l + if len(m.AgentPodName) > 0 { + i -= len(m.AgentPodName) + copy(dAtA[i:], m.AgentPodName) + i = encodeVarintService(dAtA, i, uint64(len(m.AgentPodName))) i-- dAtA[i] = 0x1a } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintService(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintService(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalTo(dAtA []byte) (int, error) { + +func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UpdateConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.ExecutionStats != nil { + _ = i + var l int + _ = l + if m.Status != nil { { - size, err := m.ExecutionStats.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6041,613 +6749,627 @@ func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalT i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func encodeVarintService(dAtA []byte, offset int, v uint64) int { - offset -= sovService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ + +func (m *GetScriptsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *SchemaRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n + +func (m *GetScriptsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SchemaResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *GetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovService(uint64(l)) - } - return n + return len(dAtA) - i, nil } -func (m *AgentInfoRequest) Size() (n int) { - if m == nil { - return 0 +func (m *GetScriptsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - return n + return dAtA[:n], nil } -func (m *AgentInfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Info) > 0 { - for _, e := range m.Info { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n +func (m *GetScriptsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AgentMetadata) Size() (n int) { - if m == nil { - return 0 - } +func (m *GetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Agent != nil { - l = m.Agent.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.CarnotInfo != nil { - l = m.CarnotInfo.Size() - n += 1 + l + sovService(uint64(l)) + if len(m.Scripts) > 0 { + for k := range m.Scripts { + v := m.Scripts[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - return n + return len(dAtA) - i, nil } -func (m *AgentUpdatesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxUpdateInterval != nil { - l = m.MaxUpdateInterval.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.MaxUpdatesPerResponse != 0 { - n += 1 + sovService(uint64(m.MaxUpdatesPerResponse)) +func (m *AddOrUpdateScriptRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *AgentUpdate) Size() (n int) { - if m == nil { - return 0 - } +func (m *AddOrUpdateScriptRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AddOrUpdateScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.AgentID != nil { - l = m.AgentID.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.Update != nil { - n += m.Update.Size() + if m.Script != nil { + { + size, err := m.Script.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *AgentUpdate_Deleted) Size() (n int) { - if m == nil { - return 0 +func (m *AddOrUpdateScriptResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - n += 2 - return n + return dAtA[:n], nil } -func (m *AgentUpdate_Agent) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Agent != nil { - l = m.Agent.Size() - n += 1 + l + sovService(uint64(l)) - } - return n + +func (m *AddOrUpdateScriptResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AgentUpdate_DataInfo) Size() (n int) { - if m == nil { - return 0 - } + +func (m *AddOrUpdateScriptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.DataInfo != nil { - l = m.DataInfo.Size() - n += 1 + l + sovService(uint64(l)) - } - return n + return len(dAtA) - i, nil } -func (m *AgentUpdatesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AgentUpdates) > 0 { - for _, e := range m.AgentUpdates { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - if len(m.AgentSchemas) > 0 { - for _, e := range m.AgentSchemas { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - if m.AgentSchemasUpdated { - n += 2 - } - if m.EndOfVersion { - n += 2 + +func (m *DeleteScriptRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *WithPrefixKeyRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Prefix) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Proto) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n +func (m *DeleteScriptRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WithPrefixKeyResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *DeleteScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Kvs) > 0 { - for _, e := range m.Kvs { - l = e.Size() - n += 1 + l + sovService(uint64(l)) + if m.ScriptID != nil { + { + size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *WithPrefixKeyResponse_KV) Size() (n int) { - if m == nil { - return 0 +func (m *DeleteScriptResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *DeleteScriptResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteScriptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n + return len(dAtA) - i, nil } -func (m *RegisterTracepointRequest) Size() (n int) { - if m == nil { - return 0 +func (m *SetScriptsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - if len(m.Requests) > 0 { - for _, e := range m.Requests { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n + return dAtA[:n], nil } -func (m *RegisterTracepointRequest_TracepointRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TracepointDeployment != nil { - l = m.TracepointDeployment.Size() - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.TTL != nil { - l = m.TTL.Size() - n += 1 + l + sovService(uint64(l)) - } - return n +func (m *SetScriptsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *SetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Tracepoints) > 0 { - for _, e := range m.Tracepoints { - l = e.Size() - n += 1 + l + sovService(uint64(l)) + if len(m.Scripts) > 0 { + for k := range m.Scripts { + v := m.Scripts[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovService(uint64(l)) - } - return n + return len(dAtA) - i, nil } -func (m *RegisterTracepointResponse_TracepointStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) +func (m *SetScriptsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *GetTracepointInfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.IDs) > 0 { - for _, e := range m.IDs { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n +func (m *SetScriptsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetTracepointInfoResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *SetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Tracepoints) > 0 { - for _, e := range m.Tracepoints { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n + return len(dAtA) - i, nil } -func (m *GetTracepointInfoResponse_TracepointState) Size() (n int) { - if m == nil { - return 0 +func (m *ExecutionStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ExecutionStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.State != 0 { - n += 1 + sovService(uint64(m.State)) - } - if len(m.Statuses) > 0 { - for _, e := range m.Statuses { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } + if m.RecordsProcessed != 0 { + i = encodeVarintService(dAtA, i, uint64(m.RecordsProcessed)) + i-- + dAtA[i] = 0x20 } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) + if m.BytesProcessed != 0 { + i = encodeVarintService(dAtA, i, uint64(m.BytesProcessed)) + i-- + dAtA[i] = 0x18 } - if m.ExpectedState != 0 { - n += 1 + sovService(uint64(m.ExpectedState)) + if m.CompilationTimeNs != 0 { + i = encodeVarintService(dAtA, i, uint64(m.CompilationTimeNs)) + i-- + dAtA[i] = 0x10 } - if len(m.SchemaNames) > 0 { - for _, s := range m.SchemaNames { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } + if m.ExecutionTimeNs != 0 { + i = encodeVarintService(dAtA, i, uint64(m.ExecutionTimeNs)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *RemoveTracepointRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } +func (m *RecordExecutionResultRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *RemoveTracepointResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovService(uint64(l)) - } - return n +func (m *RecordExecutionResultRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UpdateConfigRequest) Size() (n int) { - if m == nil { - return 0 - } +func (m *RecordExecutionResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovService(uint64(l)) + if m.Result != nil { + { + size := m.Result.Size() + i -= size + if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovService(uint64(l)) + if m.Timestamp != nil { + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - l = len(m.AgentPodName) - if l > 0 { - n += 1 + l + sovService(uint64(l)) + if m.ScriptID != nil { + { + size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *UpdateConfigResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovService(uint64(l)) - } - return n +func (m *RecordExecutionResultRequest_Error) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetScriptsRequest) Size() (n int) { - if m == nil { - return 0 +func (m *RecordExecutionResultRequest_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - var l int - _ = l - return n + return len(dAtA) - i, nil +} +func (m *RecordExecutionResultRequest_ExecutionStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetScriptsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Scripts) > 0 { - for k, v := range m.Scripts { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovService(uint64(l)) +func (m *RecordExecutionResultRequest_ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExecutionStats != nil { + { + size, err := m.ExecutionStats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - mapEntrySize := 1 + len(k) + sovService(uint64(len(k))) + l - n += mapEntrySize + 1 + sovService(uint64(mapEntrySize)) + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - return n + return len(dAtA) - i, nil } - -func (m *AddOrUpdateScriptRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Script != nil { - l = m.Script.Size() - n += 1 + l + sovService(uint64(l)) +func (m *RecordExecutionResultResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *AddOrUpdateScriptResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +func (m *RecordExecutionResultResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DeleteScriptRequest) Size() (n int) { - if m == nil { - return 0 - } +func (m *RecordExecutionResultResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.ScriptID != nil { - l = m.ScriptID.Size() - n += 1 + l + sovService(uint64(l)) - } - return n + return len(dAtA) - i, nil } -func (m *DeleteScriptResponse) Size() (n int) { - if m == nil { - return 0 +func (m *GetAllExecutionResultsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *GetAllExecutionResultsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetAllExecutionResultsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - return n + return len(dAtA) - i, nil } -func (m *SetScriptsRequest) Size() (n int) { - if m == nil { - return 0 +func (m *GetAllExecutionResultsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *GetAllExecutionResultsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetAllExecutionResultsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Scripts) > 0 { - for k, v := range m.Scripts { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovService(uint64(l)) + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } - mapEntrySize := 1 + len(k) + sovService(uint64(len(k))) + l - n += mapEntrySize + 1 + sovService(uint64(mapEntrySize)) + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *SetScriptsResponse) Size() (n int) { - if m == nil { - return 0 +func (m *GetAllExecutionResultsResponse_ExecutionResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - return n + return dAtA[:n], nil } -func (m *ExecutionStats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExecutionTimeNs != 0 { - n += 1 + sovService(uint64(m.ExecutionTimeNs)) - } - if m.CompilationTimeNs != 0 { - n += 1 + sovService(uint64(m.CompilationTimeNs)) - } - if m.BytesProcessed != 0 { - n += 1 + sovService(uint64(m.BytesProcessed)) - } - if m.RecordsProcessed != 0 { - n += 1 + sovService(uint64(m.RecordsProcessed)) - } - return n +func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RecordExecutionResultRequest) Size() (n int) { - if m == nil { - return 0 - } +func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.ScriptID != nil { - l = m.ScriptID.Size() - n += 1 + l + sovService(uint64(l)) + if m.Result != nil { + { + size := m.Result.Size() + i -= size + if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } if m.Timestamp != nil { - l = m.Timestamp.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.Result != nil { - n += m.Result.Size() + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return n + if m.ScriptID != nil { + { + size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RecordExecutionResultRequest_Error) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.Error != nil { - l = m.Error.Size() - n += 1 + l + sovService(uint64(l)) + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - return n + return len(dAtA) - i, nil } -func (m *RecordExecutionResultRequest_ExecutionStats) Size() (n int) { +func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExecutionStats != nil { + { + size, err := m.ExecutionStats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func encodeVarintService(dAtA []byte, offset int, v uint64) int { + offset -= sovService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SchemaRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.ExecutionStats != nil { - l = m.ExecutionStats.Size() - n += 1 + l + sovService(uint64(l)) - } return n } -func (m *RecordExecutionResultResponse) Size() (n int) { + +func (m *SchemaResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovService(uint64(l)) + } return n } -func (m *GetAllExecutionResultsRequest) Size() (n int) { +func (m *AgentInfoRequest) Size() (n int) { if m == nil { return 0 } @@ -6656,14 +7378,14 @@ func (m *GetAllExecutionResultsRequest) Size() (n int) { return n } -func (m *GetAllExecutionResultsResponse) Size() (n int) { +func (m *AgentInfoResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Results) > 0 { - for _, e := range m.Results { + if len(m.Info) > 0 { + for _, e := range m.Info { l = e.Size() n += 1 + l + sovService(uint64(l)) } @@ -6671,586 +7393,1949 @@ func (m *GetAllExecutionResultsResponse) Size() (n int) { return n } -func (m *GetAllExecutionResultsResponse_ExecutionResult) Size() (n int) { +func (m *AgentMetadata) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.ScriptID != nil { - l = m.ScriptID.Size() + if m.Agent != nil { + l = m.Agent.Size() n += 1 + l + sovService(uint64(l)) } - if m.Timestamp != nil { - l = m.Timestamp.Size() + if m.Status != nil { + l = m.Status.Size() n += 1 + l + sovService(uint64(l)) } - if m.Result != nil { - n += m.Result.Size() + if m.CarnotInfo != nil { + l = m.CarnotInfo.Size() + n += 1 + l + sovService(uint64(l)) } return n } -func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) Size() (n int) { +func (m *AgentUpdatesRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Error != nil { - l = m.Error.Size() + if m.MaxUpdateInterval != nil { + l = m.MaxUpdateInterval.Size() n += 1 + l + sovService(uint64(l)) } + if m.MaxUpdatesPerResponse != 0 { + n += 1 + sovService(uint64(m.MaxUpdatesPerResponse)) + } return n } -func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) Size() (n int) { + +func (m *AgentUpdate) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.ExecutionStats != nil { - l = m.ExecutionStats.Size() + if m.AgentID != nil { + l = m.AgentID.Size() n += 1 + l + sovService(uint64(l)) } + if m.Update != nil { + n += m.Update.Size() + } return n } -func sovService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozService(x uint64) (n int) { - return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *SchemaRequest) String() string { - if this == nil { - return "nil" +func (m *AgentUpdate_Deleted) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SchemaRequest{`, - `}`, - }, "") - return s + var l int + _ = l + n += 2 + return n } -func (this *SchemaResponse) String() string { - if this == nil { - return "nil" +func (m *AgentUpdate_Agent) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&SchemaResponse{`, - `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "Schema", "schemapb.Schema", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AgentInfoRequest) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Agent != nil { + l = m.Agent.Size() + n += 1 + l + sovService(uint64(l)) } - s := strings.Join([]string{`&AgentInfoRequest{`, - `}`, - }, "") - return s + return n } -func (this *AgentInfoResponse) String() string { - if this == nil { - return "nil" +func (m *AgentUpdate_DataInfo) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForInfo := "[]*AgentMetadata{" - for _, f := range this.Info { - repeatedStringForInfo += strings.Replace(f.String(), "AgentMetadata", "AgentMetadata", 1) + "," + var l int + _ = l + if m.DataInfo != nil { + l = m.DataInfo.Size() + n += 1 + l + sovService(uint64(l)) } - repeatedStringForInfo += "}" - s := strings.Join([]string{`&AgentInfoResponse{`, - `Info:` + repeatedStringForInfo + `,`, - `}`, - }, "") - return s + return n } -func (this *AgentMetadata) String() string { - if this == nil { - return "nil" +func (m *AgentUpdatesResponse) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AgentMetadata{`, - `Agent:` + strings.Replace(fmt.Sprintf("%v", this.Agent), "Agent", "agentpb.Agent", 1) + `,`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "AgentStatus", "agentpb.AgentStatus", 1) + `,`, - `CarnotInfo:` + strings.Replace(fmt.Sprintf("%v", this.CarnotInfo), "CarnotInfo", "distributedpb.CarnotInfo", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AgentUpdatesRequest) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.AgentUpdates) > 0 { + for _, e := range m.AgentUpdates { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } } - s := strings.Join([]string{`&AgentUpdatesRequest{`, - `MaxUpdateInterval:` + strings.Replace(fmt.Sprintf("%v", this.MaxUpdateInterval), "Duration", "types.Duration", 1) + `,`, - `MaxUpdatesPerResponse:` + fmt.Sprintf("%v", this.MaxUpdatesPerResponse) + `,`, - `}`, - }, "") - return s -} -func (this *AgentUpdate) String() string { - if this == nil { - return "nil" + if len(m.AgentSchemas) > 0 { + for _, e := range m.AgentSchemas { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } } - s := strings.Join([]string{`&AgentUpdate{`, - `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "UUID", "uuidpb.UUID", 1) + `,`, - `Update:` + fmt.Sprintf("%v", this.Update) + `,`, - `}`, - }, "") - return s -} -func (this *AgentUpdate_Deleted) String() string { - if this == nil { - return "nil" + if m.AgentSchemasUpdated { + n += 2 } - s := strings.Join([]string{`&AgentUpdate_Deleted{`, - `Deleted:` + fmt.Sprintf("%v", this.Deleted) + `,`, - `}`, - }, "") - return s -} -func (this *AgentUpdate_Agent) String() string { - if this == nil { - return "nil" + if m.EndOfVersion { + n += 2 } - s := strings.Join([]string{`&AgentUpdate_Agent{`, - `Agent:` + strings.Replace(fmt.Sprintf("%v", this.Agent), "Agent", "agentpb.Agent", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AgentUpdate_DataInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AgentUpdate_DataInfo{`, - `DataInfo:` + strings.Replace(fmt.Sprintf("%v", this.DataInfo), "AgentDataInfo", "messagespb.AgentDataInfo", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *AgentUpdatesResponse) String() string { - if this == nil { - return "nil" + +func (m *WithPrefixKeyRequest) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForAgentUpdates := "[]*AgentUpdate{" - for _, f := range this.AgentUpdates { - repeatedStringForAgentUpdates += strings.Replace(f.String(), "AgentUpdate", "AgentUpdate", 1) + "," + var l int + _ = l + l = len(m.Prefix) + if l > 0 { + n += 1 + l + sovService(uint64(l)) } - repeatedStringForAgentUpdates += "}" - repeatedStringForAgentSchemas := "[]*SchemaInfo{" - for _, f := range this.AgentSchemas { - repeatedStringForAgentSchemas += strings.Replace(fmt.Sprintf("%v", f), "SchemaInfo", "distributedpb.SchemaInfo", 1) + "," + l = len(m.Proto) + if l > 0 { + n += 1 + l + sovService(uint64(l)) } - repeatedStringForAgentSchemas += "}" - s := strings.Join([]string{`&AgentUpdatesResponse{`, - `AgentUpdates:` + repeatedStringForAgentUpdates + `,`, - `AgentSchemas:` + repeatedStringForAgentSchemas + `,`, - `AgentSchemasUpdated:` + fmt.Sprintf("%v", this.AgentSchemasUpdated) + `,`, - `EndOfVersion:` + fmt.Sprintf("%v", this.EndOfVersion) + `,`, - `}`, - }, "") - return s + return n } -func (this *WithPrefixKeyRequest) String() string { - if this == nil { - return "nil" + +func (m *WithPrefixKeyResponse) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&WithPrefixKeyRequest{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `Proto:` + fmt.Sprintf("%v", this.Proto) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Kvs) > 0 { + for _, e := range m.Kvs { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + return n } -func (this *WithPrefixKeyResponse) String() string { - if this == nil { - return "nil" + +func (m *WithPrefixKeyResponse_KV) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForKvs := "[]*WithPrefixKeyResponse_KV{" - for _, f := range this.Kvs { - repeatedStringForKvs += strings.Replace(fmt.Sprintf("%v", f), "WithPrefixKeyResponse_KV", "WithPrefixKeyResponse_KV", 1) + "," + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovService(uint64(l)) } - repeatedStringForKvs += "}" - s := strings.Join([]string{`&WithPrefixKeyResponse{`, - `Kvs:` + repeatedStringForKvs + `,`, - `}`, - }, "") - return s -} -func (this *WithPrefixKeyResponse_KV) String() string { - if this == nil { - return "nil" + l = len(m.Value) + if l > 0 { + n += 1 + l + sovService(uint64(l)) } - s := strings.Join([]string{`&WithPrefixKeyResponse_KV{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s + return n } -func (this *RegisterTracepointRequest) String() string { - if this == nil { - return "nil" + +func (m *RegisterFileSourceRequest) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForRequests := "[]*RegisterTracepointRequest_TracepointRequest{" - for _, f := range this.Requests { - repeatedStringForRequests += strings.Replace(fmt.Sprintf("%v", f), "RegisterTracepointRequest_TracepointRequest", "RegisterTracepointRequest_TracepointRequest", 1) + "," + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } } - repeatedStringForRequests += "}" - s := strings.Join([]string{`&RegisterTracepointRequest{`, - `Requests:` + repeatedStringForRequests + `,`, - `}`, - }, "") - return s + return n } -func (this *RegisterTracepointRequest_TracepointRequest) String() string { - if this == nil { - return "nil" + +func (m *RegisterFileSourceResponse) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&RegisterTracepointRequest_TracepointRequest{`, - `TracepointDeployment:` + strings.Replace(fmt.Sprintf("%v", this.TracepointDeployment), "TracepointDeployment", "logicalpb.TracepointDeployment", 1) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TTL:` + strings.Replace(fmt.Sprintf("%v", this.TTL), "Duration", "types.Duration", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RegisterTracepointResponse) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.FileSources) > 0 { + for _, e := range m.FileSources { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } } - repeatedStringForTracepoints := "[]*RegisterTracepointResponse_TracepointStatus{" - for _, f := range this.Tracepoints { - repeatedStringForTracepoints += strings.Replace(fmt.Sprintf("%v", f), "RegisterTracepointResponse_TracepointStatus", "RegisterTracepointResponse_TracepointStatus", 1) + "," + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovService(uint64(l)) } - repeatedStringForTracepoints += "}" - s := strings.Join([]string{`&RegisterTracepointResponse{`, - `Tracepoints:` + repeatedStringForTracepoints + `,`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *RegisterTracepointResponse_TracepointStatus) String() string { - if this == nil { - return "nil" + +func (m *RegisterFileSourceResponse_FileSourceStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&RegisterTracepointResponse_TracepointStatus{`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *GetTracepointInfoRequest) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovService(uint64(l)) } - repeatedStringForIDs := "[]*UUID{" - for _, f := range this.IDs { - repeatedStringForIDs += strings.Replace(fmt.Sprintf("%v", f), "UUID", "uuidpb.UUID", 1) + "," + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovService(uint64(l)) } - repeatedStringForIDs += "}" - s := strings.Join([]string{`&GetTracepointInfoRequest{`, - `IDs:` + repeatedStringForIDs + `,`, - `}`, - }, "") - return s + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n } -func (this *GetTracepointInfoResponse) String() string { - if this == nil { - return "nil" + +func (m *GetFileSourceInfoRequest) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForTracepoints := "[]*GetTracepointInfoResponse_TracepointState{" - for _, f := range this.Tracepoints { - repeatedStringForTracepoints += strings.Replace(fmt.Sprintf("%v", f), "GetTracepointInfoResponse_TracepointState", "GetTracepointInfoResponse_TracepointState", 1) + "," + var l int + _ = l + if len(m.IDs) > 0 { + for _, e := range m.IDs { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } } - repeatedStringForTracepoints += "}" - s := strings.Join([]string{`&GetTracepointInfoResponse{`, - `Tracepoints:` + repeatedStringForTracepoints + `,`, - `}`, - }, "") - return s + return n } -func (this *GetTracepointInfoResponse_TracepointState) String() string { - if this == nil { - return "nil" + +func (m *GetFileSourceInfoResponse) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForStatuses := "[]*Status{" - for _, f := range this.Statuses { - repeatedStringForStatuses += strings.Replace(fmt.Sprintf("%v", f), "Status", "statuspb.Status", 1) + "," + var l int + _ = l + if len(m.FileSources) > 0 { + for _, e := range m.FileSources { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } } - repeatedStringForStatuses += "}" - s := strings.Join([]string{`&GetTracepointInfoResponse_TracepointState{`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `Statuses:` + repeatedStringForStatuses + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ExpectedState:` + fmt.Sprintf("%v", this.ExpectedState) + `,`, - `SchemaNames:` + fmt.Sprintf("%v", this.SchemaNames) + `,`, - `}`, - }, "") - return s + return n } -func (this *RemoveTracepointRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemoveTracepointRequest{`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `}`, - }, "") - return s -} -func (this *RemoveTracepointResponse) String() string { - if this == nil { - return "nil" + +func (m *GetFileSourceInfoResponse_FileSourceState) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&RemoveTracepointResponse{`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateConfigRequest) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovService(uint64(l)) } - s := strings.Join([]string{`&UpdateConfigRequest{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `AgentPodName:` + fmt.Sprintf("%v", this.AgentPodName) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateConfigResponse) String() string { - if this == nil { - return "nil" + if m.State != 0 { + n += 1 + sovService(uint64(m.State)) } - s := strings.Join([]string{`&UpdateConfigResponse{`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `}`, - }, "") - return s -} -func (this *GetScriptsRequest) String() string { - if this == nil { - return "nil" + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } } - s := strings.Join([]string{`&GetScriptsRequest{`, - `}`, - }, "") - return s -} -func (this *GetScriptsResponse) String() string { - if this == nil { - return "nil" + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) } - keysForScripts := make([]string, 0, len(this.Scripts)) - for k, _ := range this.Scripts { - keysForScripts = append(keysForScripts, k) + if m.ExpectedState != 0 { + n += 1 + sovService(uint64(m.ExpectedState)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForScripts) - mapStringForScripts := "map[string]*cvmsgspb.CronScript{" - for _, k := range keysForScripts { - mapStringForScripts += fmt.Sprintf("%v: %v,", k, this.Scripts[k]) + if len(m.SchemaNames) > 0 { + for _, s := range m.SchemaNames { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } } - mapStringForScripts += "}" - s := strings.Join([]string{`&GetScriptsResponse{`, - `Scripts:` + mapStringForScripts + `,`, - `}`, - }, "") - return s + return n } -func (this *AddOrUpdateScriptRequest) String() string { - if this == nil { - return "nil" + +func (m *RemoveFileSourceRequest) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AddOrUpdateScriptRequest{`, - `Script:` + strings.Replace(fmt.Sprintf("%v", this.Script), "CronScript", "cvmsgspb.CronScript", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AddOrUpdateScriptResponse) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } } - s := strings.Join([]string{`&AddOrUpdateScriptResponse{`, - `}`, - }, "") - return s + return n } -func (this *DeleteScriptRequest) String() string { - if this == nil { - return "nil" + +func (m *RemoveFileSourceResponse) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DeleteScriptRequest{`, - `ScriptID:` + strings.Replace(fmt.Sprintf("%v", this.ScriptID), "UUID", "uuidpb.UUID", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovService(uint64(l)) + } + return n } -func (this *DeleteScriptResponse) String() string { - if this == nil { - return "nil" + +func (m *RegisterTracepointRequest) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&DeleteScriptResponse{`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + return n } -func (this *SetScriptsRequest) String() string { - if this == nil { - return "nil" + +func (m *RegisterTracepointRequest_TracepointRequest) Size() (n int) { + if m == nil { + return 0 } - keysForScripts := make([]string, 0, len(this.Scripts)) - for k, _ := range this.Scripts { - keysForScripts = append(keysForScripts, k) + var l int + _ = l + if m.TracepointDeployment != nil { + l = m.TracepointDeployment.Size() + n += 1 + l + sovService(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForScripts) - mapStringForScripts := "map[string]*cvmsgspb.CronScript{" - for _, k := range keysForScripts { - mapStringForScripts += fmt.Sprintf("%v: %v,", k, this.Scripts[k]) + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) } - mapStringForScripts += "}" - s := strings.Join([]string{`&SetScriptsRequest{`, - `Scripts:` + mapStringForScripts + `,`, - `}`, - }, "") - return s -} -func (this *SetScriptsResponse) String() string { - if this == nil { - return "nil" + if m.TTL != nil { + l = m.TTL.Size() + n += 1 + l + sovService(uint64(l)) } - s := strings.Join([]string{`&SetScriptsResponse{`, - `}`, - }, "") - return s + return n } -func (this *ExecutionStats) String() string { - if this == nil { - return "nil" + +func (m *RegisterTracepointResponse) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ExecutionStats{`, - `ExecutionTimeNs:` + fmt.Sprintf("%v", this.ExecutionTimeNs) + `,`, - `CompilationTimeNs:` + fmt.Sprintf("%v", this.CompilationTimeNs) + `,`, - `BytesProcessed:` + fmt.Sprintf("%v", this.BytesProcessed) + `,`, - `RecordsProcessed:` + fmt.Sprintf("%v", this.RecordsProcessed) + `,`, - `}`, - }, "") - return s -} -func (this *RecordExecutionResultRequest) String() string { - if this == nil { - return "nil" + var l int + _ = l + if len(m.Tracepoints) > 0 { + for _, e := range m.Tracepoints { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } } - s := strings.Join([]string{`&RecordExecutionResultRequest{`, - `ScriptID:` + strings.Replace(fmt.Sprintf("%v", this.ScriptID), "UUID", "uuidpb.UUID", 1) + `,`, - `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1) + `,`, - `Result:` + fmt.Sprintf("%v", this.Result) + `,`, - `}`, - }, "") - return s -} -func (this *RecordExecutionResultRequest_Error) String() string { - if this == nil { - return "nil" + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovService(uint64(l)) } - s := strings.Join([]string{`&RecordExecutionResultRequest_Error{`, - `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Status", "statuspb.Status", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *RecordExecutionResultRequest_ExecutionStats) String() string { - if this == nil { - return "nil" + +func (m *RegisterTracepointResponse_TracepointStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&RecordExecutionResultRequest_ExecutionStats{`, - `ExecutionStats:` + strings.Replace(fmt.Sprintf("%v", this.ExecutionStats), "ExecutionStats", "ExecutionStats", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RecordExecutionResultResponse) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovService(uint64(l)) + } + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *GetTracepointInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.IDs) > 0 { + for _, e := range m.IDs { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + return n +} + +func (m *GetTracepointInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tracepoints) > 0 { + for _, e := range m.Tracepoints { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + return n +} + +func (m *GetTracepointInfoResponse_TracepointState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovService(uint64(l)) + } + if m.State != 0 { + n += 1 + sovService(uint64(m.State)) + } + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + if m.ExpectedState != 0 { + n += 1 + sovService(uint64(m.ExpectedState)) + } + if len(m.SchemaNames) > 0 { + for _, s := range m.SchemaNames { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } + } + return n +} + +func (m *RemoveTracepointRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } + } + return n +} + +func (m *RemoveTracepointResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *UpdateConfigRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.AgentPodName) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *UpdateConfigResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *GetScriptsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *GetScriptsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Scripts) > 0 { + for k, v := range m.Scripts { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovService(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovService(uint64(len(k))) + l + n += mapEntrySize + 1 + sovService(uint64(mapEntrySize)) + } + } + return n +} + +func (m *AddOrUpdateScriptRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Script != nil { + l = m.Script.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *AddOrUpdateScriptResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *DeleteScriptRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScriptID != nil { + l = m.ScriptID.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *DeleteScriptResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *SetScriptsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Scripts) > 0 { + for k, v := range m.Scripts { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovService(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovService(uint64(len(k))) + l + n += mapEntrySize + 1 + sovService(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SetScriptsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ExecutionStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExecutionTimeNs != 0 { + n += 1 + sovService(uint64(m.ExecutionTimeNs)) + } + if m.CompilationTimeNs != 0 { + n += 1 + sovService(uint64(m.CompilationTimeNs)) + } + if m.BytesProcessed != 0 { + n += 1 + sovService(uint64(m.BytesProcessed)) + } + if m.RecordsProcessed != 0 { + n += 1 + sovService(uint64(m.RecordsProcessed)) + } + return n +} + +func (m *RecordExecutionResultRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScriptID != nil { + l = m.ScriptID.Size() + n += 1 + l + sovService(uint64(l)) + } + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovService(uint64(l)) + } + if m.Result != nil { + n += m.Result.Size() + } + return n +} + +func (m *RecordExecutionResultRequest_Error) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} +func (m *RecordExecutionResultRequest_ExecutionStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExecutionStats != nil { + l = m.ExecutionStats.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} +func (m *RecordExecutionResultResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *GetAllExecutionResultsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *GetAllExecutionResultsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + return n +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScriptID != nil { + l = m.ScriptID.Size() + n += 1 + l + sovService(uint64(l)) + } + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovService(uint64(l)) + } + if m.Result != nil { + n += m.Result.Size() + } + return n +} + +func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} +func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExecutionStats != nil { + l = m.ExecutionStats.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func sovService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozService(x uint64) (n int) { + return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SchemaRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SchemaRequest{`, + `}`, + }, "") + return s +} +func (this *SchemaResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SchemaResponse{`, + `Schema:` + strings.Replace(fmt.Sprintf("%v", this.Schema), "Schema", "schemapb.Schema", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AgentInfoRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentInfoRequest{`, + `}`, + }, "") + return s +} +func (this *AgentInfoResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForInfo := "[]*AgentMetadata{" + for _, f := range this.Info { + repeatedStringForInfo += strings.Replace(f.String(), "AgentMetadata", "AgentMetadata", 1) + "," + } + repeatedStringForInfo += "}" + s := strings.Join([]string{`&AgentInfoResponse{`, + `Info:` + repeatedStringForInfo + `,`, + `}`, + }, "") + return s +} +func (this *AgentMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentMetadata{`, + `Agent:` + strings.Replace(fmt.Sprintf("%v", this.Agent), "Agent", "agentpb.Agent", 1) + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "AgentStatus", "agentpb.AgentStatus", 1) + `,`, + `CarnotInfo:` + strings.Replace(fmt.Sprintf("%v", this.CarnotInfo), "CarnotInfo", "distributedpb.CarnotInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AgentUpdatesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentUpdatesRequest{`, + `MaxUpdateInterval:` + strings.Replace(fmt.Sprintf("%v", this.MaxUpdateInterval), "Duration", "types.Duration", 1) + `,`, + `MaxUpdatesPerResponse:` + fmt.Sprintf("%v", this.MaxUpdatesPerResponse) + `,`, + `}`, + }, "") + return s +} +func (this *AgentUpdate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentUpdate{`, + `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "UUID", "uuidpb.UUID", 1) + `,`, + `Update:` + fmt.Sprintf("%v", this.Update) + `,`, + `}`, + }, "") + return s +} +func (this *AgentUpdate_Deleted) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentUpdate_Deleted{`, + `Deleted:` + fmt.Sprintf("%v", this.Deleted) + `,`, + `}`, + }, "") + return s +} +func (this *AgentUpdate_Agent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentUpdate_Agent{`, + `Agent:` + strings.Replace(fmt.Sprintf("%v", this.Agent), "Agent", "agentpb.Agent", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AgentUpdate_DataInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentUpdate_DataInfo{`, + `DataInfo:` + strings.Replace(fmt.Sprintf("%v", this.DataInfo), "AgentDataInfo", "messagespb.AgentDataInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AgentUpdatesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForAgentUpdates := "[]*AgentUpdate{" + for _, f := range this.AgentUpdates { + repeatedStringForAgentUpdates += strings.Replace(f.String(), "AgentUpdate", "AgentUpdate", 1) + "," + } + repeatedStringForAgentUpdates += "}" + repeatedStringForAgentSchemas := "[]*SchemaInfo{" + for _, f := range this.AgentSchemas { + repeatedStringForAgentSchemas += strings.Replace(fmt.Sprintf("%v", f), "SchemaInfo", "distributedpb.SchemaInfo", 1) + "," + } + repeatedStringForAgentSchemas += "}" + s := strings.Join([]string{`&AgentUpdatesResponse{`, + `AgentUpdates:` + repeatedStringForAgentUpdates + `,`, + `AgentSchemas:` + repeatedStringForAgentSchemas + `,`, + `AgentSchemasUpdated:` + fmt.Sprintf("%v", this.AgentSchemasUpdated) + `,`, + `EndOfVersion:` + fmt.Sprintf("%v", this.EndOfVersion) + `,`, + `}`, + }, "") + return s +} +func (this *WithPrefixKeyRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WithPrefixKeyRequest{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `Proto:` + fmt.Sprintf("%v", this.Proto) + `,`, + `}`, + }, "") + return s +} +func (this *WithPrefixKeyResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForKvs := "[]*WithPrefixKeyResponse_KV{" + for _, f := range this.Kvs { + repeatedStringForKvs += strings.Replace(fmt.Sprintf("%v", f), "WithPrefixKeyResponse_KV", "WithPrefixKeyResponse_KV", 1) + "," + } + repeatedStringForKvs += "}" + s := strings.Join([]string{`&WithPrefixKeyResponse{`, + `Kvs:` + repeatedStringForKvs + `,`, + `}`, + }, "") + return s +} +func (this *WithPrefixKeyResponse_KV) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WithPrefixKeyResponse_KV{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *RegisterFileSourceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]*FileSourceDeployment{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(fmt.Sprintf("%v", f), "FileSourceDeployment", "ir.FileSourceDeployment", 1) + "," + } + repeatedStringForRequests += "}" + s := strings.Join([]string{`&RegisterFileSourceRequest{`, + `Requests:` + repeatedStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *RegisterFileSourceResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForFileSources := "[]*RegisterFileSourceResponse_FileSourceStatus{" + for _, f := range this.FileSources { + repeatedStringForFileSources += strings.Replace(fmt.Sprintf("%v", f), "RegisterFileSourceResponse_FileSourceStatus", "RegisterFileSourceResponse_FileSourceStatus", 1) + "," + } + repeatedStringForFileSources += "}" + s := strings.Join([]string{`&RegisterFileSourceResponse{`, + `FileSources:` + repeatedStringForFileSources + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RegisterFileSourceResponse_FileSourceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegisterFileSourceResponse_FileSourceStatus{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *GetFileSourceInfoRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForIDs := "[]*UUID{" + for _, f := range this.IDs { + repeatedStringForIDs += strings.Replace(fmt.Sprintf("%v", f), "UUID", "uuidpb.UUID", 1) + "," + } + repeatedStringForIDs += "}" + s := strings.Join([]string{`&GetFileSourceInfoRequest{`, + `IDs:` + repeatedStringForIDs + `,`, + `}`, + }, "") + return s +} +func (this *GetFileSourceInfoResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForFileSources := "[]*GetFileSourceInfoResponse_FileSourceState{" + for _, f := range this.FileSources { + repeatedStringForFileSources += strings.Replace(fmt.Sprintf("%v", f), "GetFileSourceInfoResponse_FileSourceState", "GetFileSourceInfoResponse_FileSourceState", 1) + "," + } + repeatedStringForFileSources += "}" + s := strings.Join([]string{`&GetFileSourceInfoResponse{`, + `FileSources:` + repeatedStringForFileSources + `,`, + `}`, + }, "") + return s +} +func (this *GetFileSourceInfoResponse_FileSourceState) String() string { + if this == nil { + return "nil" + } + repeatedStringForStatuses := "[]*Status{" + for _, f := range this.Statuses { + repeatedStringForStatuses += strings.Replace(fmt.Sprintf("%v", f), "Status", "statuspb.Status", 1) + "," + } + repeatedStringForStatuses += "}" + s := strings.Join([]string{`&GetFileSourceInfoResponse_FileSourceState{`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Statuses:` + repeatedStringForStatuses + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ExpectedState:` + fmt.Sprintf("%v", this.ExpectedState) + `,`, + `SchemaNames:` + fmt.Sprintf("%v", this.SchemaNames) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveFileSourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveFileSourceRequest{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveFileSourceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveFileSourceResponse{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RegisterTracepointRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]*RegisterTracepointRequest_TracepointRequest{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(fmt.Sprintf("%v", f), "RegisterTracepointRequest_TracepointRequest", "RegisterTracepointRequest_TracepointRequest", 1) + "," + } + repeatedStringForRequests += "}" + s := strings.Join([]string{`&RegisterTracepointRequest{`, + `Requests:` + repeatedStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *RegisterTracepointRequest_TracepointRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegisterTracepointRequest_TracepointRequest{`, + `TracepointDeployment:` + strings.Replace(fmt.Sprintf("%v", this.TracepointDeployment), "TracepointDeployment", "logicalpb.TracepointDeployment", 1) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TTL:` + strings.Replace(fmt.Sprintf("%v", this.TTL), "Duration", "types.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RegisterTracepointResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForTracepoints := "[]*RegisterTracepointResponse_TracepointStatus{" + for _, f := range this.Tracepoints { + repeatedStringForTracepoints += strings.Replace(fmt.Sprintf("%v", f), "RegisterTracepointResponse_TracepointStatus", "RegisterTracepointResponse_TracepointStatus", 1) + "," + } + repeatedStringForTracepoints += "}" + s := strings.Join([]string{`&RegisterTracepointResponse{`, + `Tracepoints:` + repeatedStringForTracepoints + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RegisterTracepointResponse_TracepointStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegisterTracepointResponse_TracepointStatus{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *GetTracepointInfoRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForIDs := "[]*UUID{" + for _, f := range this.IDs { + repeatedStringForIDs += strings.Replace(fmt.Sprintf("%v", f), "UUID", "uuidpb.UUID", 1) + "," + } + repeatedStringForIDs += "}" + s := strings.Join([]string{`&GetTracepointInfoRequest{`, + `IDs:` + repeatedStringForIDs + `,`, + `}`, + }, "") + return s +} +func (this *GetTracepointInfoResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForTracepoints := "[]*GetTracepointInfoResponse_TracepointState{" + for _, f := range this.Tracepoints { + repeatedStringForTracepoints += strings.Replace(fmt.Sprintf("%v", f), "GetTracepointInfoResponse_TracepointState", "GetTracepointInfoResponse_TracepointState", 1) + "," + } + repeatedStringForTracepoints += "}" + s := strings.Join([]string{`&GetTracepointInfoResponse{`, + `Tracepoints:` + repeatedStringForTracepoints + `,`, + `}`, + }, "") + return s +} +func (this *GetTracepointInfoResponse_TracepointState) String() string { + if this == nil { + return "nil" + } + repeatedStringForStatuses := "[]*Status{" + for _, f := range this.Statuses { + repeatedStringForStatuses += strings.Replace(fmt.Sprintf("%v", f), "Status", "statuspb.Status", 1) + "," + } + repeatedStringForStatuses += "}" + s := strings.Join([]string{`&GetTracepointInfoResponse_TracepointState{`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Statuses:` + repeatedStringForStatuses + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ExpectedState:` + fmt.Sprintf("%v", this.ExpectedState) + `,`, + `SchemaNames:` + fmt.Sprintf("%v", this.SchemaNames) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTracepointRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTracepointRequest{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTracepointResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTracepointResponse{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigRequest{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `AgentPodName:` + fmt.Sprintf("%v", this.AgentPodName) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigResponse{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetScriptsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetScriptsRequest{`, + `}`, + }, "") + return s +} +func (this *GetScriptsResponse) String() string { + if this == nil { + return "nil" + } + keysForScripts := make([]string, 0, len(this.Scripts)) + for k, _ := range this.Scripts { + keysForScripts = append(keysForScripts, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForScripts) + mapStringForScripts := "map[string]*cvmsgspb.CronScript{" + for _, k := range keysForScripts { + mapStringForScripts += fmt.Sprintf("%v: %v,", k, this.Scripts[k]) + } + mapStringForScripts += "}" + s := strings.Join([]string{`&GetScriptsResponse{`, + `Scripts:` + mapStringForScripts + `,`, + `}`, + }, "") + return s +} +func (this *AddOrUpdateScriptRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AddOrUpdateScriptRequest{`, + `Script:` + strings.Replace(fmt.Sprintf("%v", this.Script), "CronScript", "cvmsgspb.CronScript", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AddOrUpdateScriptResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AddOrUpdateScriptResponse{`, + `}`, + }, "") + return s +} +func (this *DeleteScriptRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteScriptRequest{`, + `ScriptID:` + strings.Replace(fmt.Sprintf("%v", this.ScriptID), "UUID", "uuidpb.UUID", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteScriptResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteScriptResponse{`, + `}`, + }, "") + return s +} +func (this *SetScriptsRequest) String() string { + if this == nil { + return "nil" + } + keysForScripts := make([]string, 0, len(this.Scripts)) + for k, _ := range this.Scripts { + keysForScripts = append(keysForScripts, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForScripts) + mapStringForScripts := "map[string]*cvmsgspb.CronScript{" + for _, k := range keysForScripts { + mapStringForScripts += fmt.Sprintf("%v: %v,", k, this.Scripts[k]) + } + mapStringForScripts += "}" + s := strings.Join([]string{`&SetScriptsRequest{`, + `Scripts:` + mapStringForScripts + `,`, + `}`, + }, "") + return s +} +func (this *SetScriptsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SetScriptsResponse{`, + `}`, + }, "") + return s +} +func (this *ExecutionStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecutionStats{`, + `ExecutionTimeNs:` + fmt.Sprintf("%v", this.ExecutionTimeNs) + `,`, + `CompilationTimeNs:` + fmt.Sprintf("%v", this.CompilationTimeNs) + `,`, + `BytesProcessed:` + fmt.Sprintf("%v", this.BytesProcessed) + `,`, + `RecordsProcessed:` + fmt.Sprintf("%v", this.RecordsProcessed) + `,`, + `}`, + }, "") + return s +} +func (this *RecordExecutionResultRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RecordExecutionResultRequest{`, + `ScriptID:` + strings.Replace(fmt.Sprintf("%v", this.ScriptID), "UUID", "uuidpb.UUID", 1) + `,`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `}`, + }, "") + return s +} +func (this *RecordExecutionResultRequest_Error) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RecordExecutionResultRequest_Error{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Status", "statuspb.Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RecordExecutionResultRequest_ExecutionStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RecordExecutionResultRequest_ExecutionStats{`, + `ExecutionStats:` + strings.Replace(fmt.Sprintf("%v", this.ExecutionStats), "ExecutionStats", "ExecutionStats", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RecordExecutionResultResponse) String() string { + if this == nil { + return "nil" } s := strings.Join([]string{`&RecordExecutionResultResponse{`, `}`, }, "") return s } -func (this *GetAllExecutionResultsRequest) String() string { - if this == nil { - return "nil" +func (this *GetAllExecutionResultsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetAllExecutionResultsRequest{`, + `}`, + }, "") + return s +} +func (this *GetAllExecutionResultsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]*GetAllExecutionResultsResponse_ExecutionResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(fmt.Sprintf("%v", f), "GetAllExecutionResultsResponse_ExecutionResult", "GetAllExecutionResultsResponse_ExecutionResult", 1) + "," + } + repeatedStringForResults += "}" + s := strings.Join([]string{`&GetAllExecutionResultsResponse{`, + `Results:` + repeatedStringForResults + `,`, + `}`, + }, "") + return s +} +func (this *GetAllExecutionResultsResponse_ExecutionResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult{`, + `ScriptID:` + strings.Replace(fmt.Sprintf("%v", this.ScriptID), "UUID", "uuidpb.UUID", 1) + `,`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `}`, + }, "") + return s +} +func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult_Error{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Status", "statuspb.Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats{`, + `ExecutionStats:` + strings.Replace(fmt.Sprintf("%v", this.ExecutionStats), "ExecutionStats", "ExecutionStats", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringService(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &schemapb.Schema{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AgentInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&GetAllExecutionResultsRequest{`, - `}`, - }, "") - return s + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (this *GetAllExecutionResultsResponse) String() string { - if this == nil { - return "nil" +func (m *AgentInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = append(m.Info, &AgentMetadata{}) + if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - repeatedStringForResults := "[]*GetAllExecutionResultsResponse_ExecutionResult{" - for _, f := range this.Results { - repeatedStringForResults += strings.Replace(fmt.Sprintf("%v", f), "GetAllExecutionResultsResponse_ExecutionResult", "GetAllExecutionResultsResponse_ExecutionResult", 1) + "," + + if iNdEx > l { + return io.ErrUnexpectedEOF } - repeatedStringForResults += "}" - s := strings.Join([]string{`&GetAllExecutionResultsResponse{`, - `Results:` + repeatedStringForResults + `,`, - `}`, - }, "") - return s + return nil } -func (this *GetAllExecutionResultsResponse_ExecutionResult) String() string { - if this == nil { - return "nil" +func (m *AgentMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Agent == nil { + m.Agent = &agentpb.Agent{} + } + if err := m.Agent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &agentpb.AgentStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CarnotInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CarnotInfo == nil { + m.CarnotInfo = &distributedpb.CarnotInfo{} + } + if err := m.CarnotInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult{`, - `ScriptID:` + strings.Replace(fmt.Sprintf("%v", this.ScriptID), "UUID", "uuidpb.UUID", 1) + `,`, - `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1) + `,`, - `Result:` + fmt.Sprintf("%v", this.Result) + `,`, - `}`, - }, "") - return s -} -func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) String() string { - if this == nil { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult_Error{`, - `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Status", "statuspb.Status", 1) + `,`, - `}`, - }, "") - return s + return nil } -func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) String() string { - if this == nil { - return "nil" +func (m *AgentUpdatesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentUpdatesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentUpdatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdateInterval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUpdateInterval == nil { + m.MaxUpdateInterval = &types.Duration{} + } + if err := m.MaxUpdateInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdatesPerResponse", wireType) + } + m.MaxUpdatesPerResponse = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxUpdatesPerResponse |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats{`, - `ExecutionStats:` + strings.Replace(fmt.Sprintf("%v", this.ExecutionStats), "ExecutionStats", "ExecutionStats", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringService(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *SchemaRequest) Unmarshal(dAtA []byte) error { +func (m *AgentUpdate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7260,25 +9345,152 @@ func (m *SchemaRequest) Unmarshal(dAtA []byte) error { if shift >= 64 { return ErrIntOverflowService } - if iNdEx >= l { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentID == nil { + m.AgentID = &uuidpb.UUID{} + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Update = &AgentUpdate_Deleted{b} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + v := &agentpb.Agent{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SchemaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.Update = &AgentUpdate_Agent{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &messagespb.AgentDataInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Update = &AgentUpdate_DataInfo{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -7300,7 +9512,7 @@ func (m *SchemaRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *SchemaResponse) Unmarshal(dAtA []byte) error { +func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7323,15 +9535,15 @@ func (m *SchemaResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: AgentUpdatesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AgentUpdatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AgentUpdates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7358,13 +9570,85 @@ func (m *SchemaResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Schema == nil { - m.Schema = &schemapb.Schema{} + m.AgentUpdates = append(m.AgentUpdates, &AgentUpdate{}) + if err := m.AgentUpdates[len(m.AgentUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentSchemas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AgentSchemas = append(m.AgentSchemas, &distributedpb.SchemaInfo{}) + if err := m.AgentSchemas[len(m.AgentSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentSchemasUpdated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AgentSchemasUpdated = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndOfVersion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EndOfVersion = bool(v != 0) default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -7386,7 +9670,7 @@ func (m *SchemaResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *AgentInfoRequest) Unmarshal(dAtA []byte) error { +func (m *WithPrefixKeyRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7396,25 +9680,89 @@ func (m *AgentInfoRequest) Unmarshal(dAtA []byte) error { if shift >= 64 { return ErrIntOverflowService } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WithPrefixKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WithPrefixKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proto", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if postIndex > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentInfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.Proto = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -7436,7 +9784,7 @@ func (m *AgentInfoRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *AgentInfoResponse) Unmarshal(dAtA []byte) error { +func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7459,15 +9807,15 @@ func (m *AgentInfoResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AgentInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WithPrefixKeyResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AgentInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WithPrefixKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7494,8 +9842,8 @@ func (m *AgentInfoResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Info = append(m.Info, &AgentMetadata{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Kvs = append(m.Kvs, &WithPrefixKeyResponse_KV{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7520,7 +9868,7 @@ func (m *AgentInfoResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *AgentMetadata) Unmarshal(dAtA []byte) error { +func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7543,17 +9891,17 @@ func (m *AgentMetadata) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AgentMetadata: wiretype end group for non-group") + return fmt.Errorf("proto: KV: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AgentMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -7563,33 +9911,29 @@ func (m *AgentMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Agent == nil { - m.Agent = &agentpb.Agent{} - } - if err := m.Agent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -7599,31 +9943,79 @@ func (m *AgentMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &agentpb.AgentStatus{} + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 3: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegisterFileSourceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegisterFileSourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegisterFileSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CarnotInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7650,10 +10042,8 @@ func (m *AgentMetadata) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CarnotInfo == nil { - m.CarnotInfo = &distributedpb.CarnotInfo{} - } - if err := m.CarnotInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Requests = append(m.Requests, &ir.FileSourceDeployment{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7678,7 +10068,7 @@ func (m *AgentMetadata) Unmarshal(dAtA []byte) error { } return nil } -func (m *AgentUpdatesRequest) Unmarshal(dAtA []byte) error { +func (m *RegisterFileSourceResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7701,15 +10091,15 @@ func (m *AgentUpdatesRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AgentUpdatesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RegisterFileSourceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AgentUpdatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RegisterFileSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdateInterval", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FileSources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7736,18 +10126,16 @@ func (m *AgentUpdatesRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUpdateInterval == nil { - m.MaxUpdateInterval = &types.Duration{} - } - if err := m.MaxUpdateInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.FileSources = append(m.FileSources, &RegisterFileSourceResponse_FileSourceStatus{}) + if err := m.FileSources[len(m.FileSources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdatesPerResponse", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.MaxUpdatesPerResponse = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -7757,11 +10145,28 @@ func (m *AgentUpdatesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxUpdatesPerResponse |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &statuspb.Status{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -7783,7 +10188,7 @@ func (m *AgentUpdatesRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *AgentUpdate) Unmarshal(dAtA []byte) error { +func (m *RegisterFileSourceResponse_FileSourceStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7806,15 +10211,15 @@ func (m *AgentUpdate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AgentUpdate: wiretype end group for non-group") + return fmt.Errorf("proto: FileSourceStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AgentUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FileSourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7841,18 +10246,18 @@ func (m *AgentUpdate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AgentID == nil { - m.AgentID = &uuidpb.UUID{} + if m.Status == nil { + m.Status = &statuspb.Status{} } - if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -7862,18 +10267,33 @@ func (m *AgentUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Update = &AgentUpdate_Deleted{b} + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &uuidpb.UUID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -7883,30 +10303,77 @@ func (m *AgentUpdate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - v := &agentpb.Agent{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { return err } - m.Update = &AgentUpdate_Agent{v} - iNdEx = postIndex - case 4: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetFileSourceInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetFileSourceInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetFileSourceInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IDs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7933,11 +10400,10 @@ func (m *AgentUpdate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &messagespb.AgentDataInfo{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.IDs = append(m.IDs, &uuidpb.UUID{}) + if err := m.IDs[len(m.IDs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Update = &AgentUpdate_DataInfo{v} iNdEx = postIndex default: iNdEx = preIndex @@ -7960,7 +10426,7 @@ func (m *AgentUpdate) Unmarshal(dAtA []byte) error { } return nil } -func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { +func (m *GetFileSourceInfoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7983,15 +10449,15 @@ func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AgentUpdatesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetFileSourceInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AgentUpdatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetFileSourceInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentUpdates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FileSources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8018,14 +10484,64 @@ func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AgentUpdates = append(m.AgentUpdates, &AgentUpdate{}) - if err := m.AgentUpdates[len(m.AgentUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.FileSources = append(m.FileSources, &GetFileSourceInfoResponse_FileSourceState{}) + if err := m.FileSources[len(m.FileSources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetFileSourceInfoResponse_FileSourceState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileSourceState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileSourceState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentSchemas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8052,16 +10568,18 @@ func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AgentSchemas = append(m.AgentSchemas, &distributedpb.SchemaInfo{}) - if err := m.AgentSchemas[len(m.AgentSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ID == nil { + m.ID = &uuidpb.UUID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentSchemasUpdated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - var v int + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -8071,17 +10589,16 @@ func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.State |= statuspb.LifeCycleState(b&0x7F) << shift if b < 0x80 { break } } - m.AgentSchemasUpdated = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndOfVersion", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -8091,65 +10608,29 @@ func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.EndOfVersion = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthService } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WithPrefixKeyRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Statuses = append(m.Statuses, &statuspb.Status{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WithPrefixKeyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WithPrefixKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8177,11 +10658,30 @@ func (m *WithPrefixKeyRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedState", wireType) + } + m.ExpectedState = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpectedState |= statuspb.LifeCycleState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proto", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SchemaNames", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8209,7 +10709,7 @@ func (m *WithPrefixKeyRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Proto = string(dAtA[iNdEx:postIndex]) + m.SchemaNames = append(m.SchemaNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -8232,7 +10732,7 @@ func (m *WithPrefixKeyRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { +func (m *RemoveFileSourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8255,17 +10755,17 @@ func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WithPrefixKeyResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveFileSourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WithPrefixKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveFileSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -8275,25 +10775,23 @@ func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - m.Kvs = append(m.Kvs, &WithPrefixKeyResponse_KV{}) - if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -8316,7 +10814,7 @@ func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { +func (m *RemoveFileSourceResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8339,17 +10837,17 @@ func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KV: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveFileSourceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveFileSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -8359,56 +10857,26 @@ func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Status == nil { + m.Status = &statuspb.Status{} } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: diff --git a/src/vizier/services/metadata/metadatapb/service.proto b/src/vizier/services/metadata/metadatapb/service.proto index 7a184c73d15..1b5dd699660 100644 --- a/src/vizier/services/metadata/metadatapb/service.proto +++ b/src/vizier/services/metadata/metadatapb/service.proto @@ -28,6 +28,7 @@ import "google/protobuf/timestamp.proto"; import "src/api/proto/uuidpb/uuid.proto"; import "src/carnot/planner/distributedpb/distributed_plan.proto"; import "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.proto"; +import "src/carnot/planner/file_source/ir/logical.proto"; import "src/common/base/statuspb/status.proto"; import "src/table_store/schemapb/schema.proto"; import "src/vizier/messages/messagespb/messages.proto"; @@ -45,6 +46,12 @@ service MetadataService { rpc GetWithPrefixKey(WithPrefixKeyRequest) returns (WithPrefixKeyResponse); } +service MetadataFileSourceService { + rpc RegisterFileSource(RegisterFileSourceRequest) returns (RegisterFileSourceResponse); + rpc GetFileSourceInfo(GetFileSourceInfoRequest) returns (GetFileSourceInfoResponse); + rpc RemoveFileSource(RemoveFileSourceRequest) returns (RemoveFileSourceResponse); +} + service MetadataTracepointService { rpc RegisterTracepoint(RegisterTracepointRequest) returns (RegisterTracepointResponse); rpc GetTracepointInfo(GetTracepointInfoRequest) returns (GetTracepointInfoResponse); @@ -162,6 +169,63 @@ message WithPrefixKeyResponse { repeated KV kvs = 1; } +message RegisterFileSourceRequest { + repeated px.carnot.planner.file_source.ir.FileSourceDeployment requests = 1; +} + +// The response to a RegisterFileSourceRequest. +message RegisterFileSourceResponse { + message FileSourceStatus { + px.statuspb.Status status = 1; // TODO(ddelnano): Is this necessary? + // The ID of the file source. This should be the user-specified name for the file source . + uuidpb.UUID id = 2 [ (gogoproto.customname) = "ID" ]; + string name = 3; + } + repeated FileSourceStatus file_sources = 1; + // Overall status of whether file source registration requests were initiated with/without + // errors. + px.statuspb.Status status = 2; +} + +// The request to check the status for a file source with the given names. +message GetFileSourceInfoRequest { + // The file source IDs to get the info for. If empty, fetches the info for all known file source + // s. + repeated uuidpb.UUID ids = 1 [ (gogoproto.customname) = "IDs" ]; +} + +// The status of whether the file source has successfully registered or not. +message GetFileSourceInfoResponse { + message FileSourceState { + // The file source ID. + uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; + // The state of the file source . + px.statuspb.LifeCycleState state = 2; + // The status of the file source, specified if the state of the file source is not healthy. + repeated px.statuspb.Status statuses = 3; + string name = 4; + // The desired state for the file source . This can be used to determine whether + // the file source is just starting up or in the process of terminating. + px.statuspb.LifeCycleState expected_state = 5; + repeated string schema_names = 6; + } + // List of file source states. + repeated FileSourceState file_sources = 1; +} + +// The request to evict a file source . This will normally happen via the file source 's TTL, but +// can be initiated via request as well. +message RemoveFileSourceRequest { + // The name of the file source to remove. + repeated string names = 1; +} + +// The response to the file source removal. +message RemoveFileSourceResponse { + // Status of whether the file source removal request was initiated with/without errors. + px.statuspb.Status status = 1; +} + // The request to register tracepoints on all PEMs. message RegisterTracepointRequest { message TracepointRequest { diff --git a/src/vizier/services/metadata/storepb/BUILD.bazel b/src/vizier/services/metadata/storepb/BUILD.bazel index c2a677a2c8f..f0a1ba5db8d 100644 --- a/src/vizier/services/metadata/storepb/BUILD.bazel +++ b/src/vizier/services/metadata/storepb/BUILD.bazel @@ -23,6 +23,7 @@ pl_proto_library( deps = [ "//src/api/proto/uuidpb:uuid_pl_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_proto", + "//src/carnot/planner/file_source/ir:logical_pl_proto", "//src/common/base/statuspb:status_pl_proto", "//src/shared/k8s/metadatapb:metadata_pl_proto", "//src/shared/types/typespb:types_pl_proto", @@ -37,6 +38,7 @@ pl_cc_proto_library( deps = [ "//src/api/proto/uuidpb:uuid_pl_cc_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", + "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/common/base/statuspb:status_pl_cc_proto", "//src/shared/k8s/metadatapb:metadata_pl_cc_proto", "//src/shared/types/typespb/wrapper:cc_library", @@ -52,6 +54,7 @@ pl_go_proto_library( deps = [ "//src/api/proto/uuidpb:uuid_pl_go_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto", + "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/common/base/statuspb:status_pl_go_proto", "//src/shared/k8s/metadatapb:metadata_pl_go_proto", "//src/shared/types/typespb:types_pl_go_proto", diff --git a/src/vizier/services/metadata/storepb/store.pb.go b/src/vizier/services/metadata/storepb/store.pb.go index 17bd3150f69..374e966bab0 100755 --- a/src/vizier/services/metadata/storepb/store.pb.go +++ b/src/vizier/services/metadata/storepb/store.pb.go @@ -14,6 +14,7 @@ import ( math_bits "math/bits" uuidpb "px.dev/pixie/src/api/proto/uuidpb" logicalpb "px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb" + ir "px.dev/pixie/src/carnot/planner/file_source/ir" statuspb "px.dev/pixie/src/common/base/statuspb" metadatapb "px.dev/pixie/src/shared/k8s/metadatapb" typespb "px.dev/pixie/src/shared/types/typespb" @@ -99,6 +100,73 @@ func (m *TracepointInfo) GetExpectedState() statuspb.LifeCycleState { return statuspb.UNKNOWN_STATE } +type FileSourceInfo struct { + ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + FileSource *ir.FileSourceDeployment `protobuf:"bytes,2,opt,name=file_source,json=fileSource,proto3" json:"file_source,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + ExpectedState statuspb.LifeCycleState `protobuf:"varint,4,opt,name=expected_state,json=expectedState,proto3,enum=px.statuspb.LifeCycleState" json:"expected_state,omitempty"` +} + +func (m *FileSourceInfo) Reset() { *m = FileSourceInfo{} } +func (*FileSourceInfo) ProtoMessage() {} +func (*FileSourceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_27ea71ea705227d1, []int{1} +} +func (m *FileSourceInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileSourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FileSourceInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FileSourceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSourceInfo.Merge(m, src) +} +func (m *FileSourceInfo) XXX_Size() int { + return m.Size() +} +func (m *FileSourceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_FileSourceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSourceInfo proto.InternalMessageInfo + +func (m *FileSourceInfo) GetID() *uuidpb.UUID { + if m != nil { + return m.ID + } + return nil +} + +func (m *FileSourceInfo) GetFileSource() *ir.FileSourceDeployment { + if m != nil { + return m.FileSource + } + return nil +} + +func (m *FileSourceInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FileSourceInfo) GetExpectedState() statuspb.LifeCycleState { + if m != nil { + return m.ExpectedState + } + return statuspb.UNKNOWN_STATE +} + type AgentTracepointStatus struct { State statuspb.LifeCycleState `protobuf:"varint,1,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` Status *statuspb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` @@ -109,7 +177,7 @@ type AgentTracepointStatus struct { func (m *AgentTracepointStatus) Reset() { *m = AgentTracepointStatus{} } func (*AgentTracepointStatus) ProtoMessage() {} func (*AgentTracepointStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{1} + return fileDescriptor_27ea71ea705227d1, []int{2} } func (m *AgentTracepointStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -166,6 +234,73 @@ func (m *AgentTracepointStatus) GetAgentID() *uuidpb.UUID { return nil } +type AgentFileSourceStatus struct { + State statuspb.LifeCycleState `protobuf:"varint,1,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` + Status *statuspb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + ID *uuidpb.UUID `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` + AgentID *uuidpb.UUID `protobuf:"bytes,4,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` +} + +func (m *AgentFileSourceStatus) Reset() { *m = AgentFileSourceStatus{} } +func (*AgentFileSourceStatus) ProtoMessage() {} +func (*AgentFileSourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_27ea71ea705227d1, []int{3} +} +func (m *AgentFileSourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AgentFileSourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AgentFileSourceStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AgentFileSourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AgentFileSourceStatus.Merge(m, src) +} +func (m *AgentFileSourceStatus) XXX_Size() int { + return m.Size() +} +func (m *AgentFileSourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AgentFileSourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_AgentFileSourceStatus proto.InternalMessageInfo + +func (m *AgentFileSourceStatus) GetState() statuspb.LifeCycleState { + if m != nil { + return m.State + } + return statuspb.UNKNOWN_STATE +} + +func (m *AgentFileSourceStatus) GetStatus() *statuspb.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *AgentFileSourceStatus) GetID() *uuidpb.UUID { + if m != nil { + return m.ID + } + return nil +} + +func (m *AgentFileSourceStatus) GetAgentID() *uuidpb.UUID { + if m != nil { + return m.AgentID + } + return nil +} + type TableInfo struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Desc string `protobuf:"bytes,7,opt,name=desc,proto3" json:"desc,omitempty"` @@ -174,12 +309,13 @@ type TableInfo struct { Columns []*TableInfo_ColumnInfo `protobuf:"bytes,4,rep,name=columns,proto3" json:"columns,omitempty"` Tabletized bool `protobuf:"varint,5,opt,name=tabletized,proto3" json:"tabletized,omitempty"` TabletizationKey string `protobuf:"bytes,6,opt,name=tabletization_key,json=tabletizationKey,proto3" json:"tabletization_key,omitempty"` + MutationId string `protobuf:"bytes,8,opt,name=mutation_id,json=mutationId,proto3" json:"mutation_id,omitempty"` } func (m *TableInfo) Reset() { *m = TableInfo{} } func (*TableInfo) ProtoMessage() {} func (*TableInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{2} + return fileDescriptor_27ea71ea705227d1, []int{4} } func (m *TableInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -257,6 +393,13 @@ func (m *TableInfo) GetTabletizationKey() string { return "" } +func (m *TableInfo) GetMutationId() string { + if m != nil { + return m.MutationId + } + return "" +} + type TableInfo_ColumnInfo struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` DataType typespb.DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=px.types.DataType" json:"data_type,omitempty"` @@ -268,7 +411,7 @@ type TableInfo_ColumnInfo struct { func (m *TableInfo_ColumnInfo) Reset() { *m = TableInfo_ColumnInfo{} } func (*TableInfo_ColumnInfo) ProtoMessage() {} func (*TableInfo_ColumnInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{2, 0} + return fileDescriptor_27ea71ea705227d1, []int{4, 0} } func (m *TableInfo_ColumnInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -340,7 +483,7 @@ type ComputedSchema struct { func (m *ComputedSchema) Reset() { *m = ComputedSchema{} } func (*ComputedSchema) ProtoMessage() {} func (*ComputedSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{3} + return fileDescriptor_27ea71ea705227d1, []int{5} } func (m *ComputedSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -390,7 +533,7 @@ type ComputedSchema_AgentIDs struct { func (m *ComputedSchema_AgentIDs) Reset() { *m = ComputedSchema_AgentIDs{} } func (*ComputedSchema_AgentIDs) ProtoMessage() {} func (*ComputedSchema_AgentIDs) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{3, 0} + return fileDescriptor_27ea71ea705227d1, []int{5, 0} } func (m *ComputedSchema_AgentIDs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +585,7 @@ type K8SResource struct { func (m *K8SResource) Reset() { *m = K8SResource{} } func (*K8SResource) ProtoMessage() {} func (*K8SResource) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{4} + return fileDescriptor_27ea71ea705227d1, []int{6} } func (m *K8SResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -596,7 +739,7 @@ type K8SResourceUpdate struct { func (m *K8SResourceUpdate) Reset() { *m = K8SResourceUpdate{} } func (*K8SResourceUpdate) ProtoMessage() {} func (*K8SResourceUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{5} + return fileDescriptor_27ea71ea705227d1, []int{7} } func (m *K8SResourceUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -645,7 +788,7 @@ type CronScriptResult struct { func (m *CronScriptResult) Reset() { *m = CronScriptResult{} } func (*CronScriptResult) ProtoMessage() {} func (*CronScriptResult) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{6} + return fileDescriptor_27ea71ea705227d1, []int{8} } func (m *CronScriptResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -725,7 +868,9 @@ func (m *CronScriptResult) GetRecordsProcessed() int64 { func init() { proto.RegisterType((*TracepointInfo)(nil), "px.vizier.services.metadata.TracepointInfo") + proto.RegisterType((*FileSourceInfo)(nil), "px.vizier.services.metadata.FileSourceInfo") proto.RegisterType((*AgentTracepointStatus)(nil), "px.vizier.services.metadata.AgentTracepointStatus") + proto.RegisterType((*AgentFileSourceStatus)(nil), "px.vizier.services.metadata.AgentFileSourceStatus") proto.RegisterType((*TableInfo)(nil), "px.vizier.services.metadata.TableInfo") proto.RegisterType((*TableInfo_ColumnInfo)(nil), "px.vizier.services.metadata.TableInfo.ColumnInfo") proto.RegisterType((*ComputedSchema)(nil), "px.vizier.services.metadata.ComputedSchema") @@ -741,87 +886,92 @@ func init() { } var fileDescriptor_27ea71ea705227d1 = []byte{ - // 1273 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xc6, 0x4e, 0x62, 0x3f, 0xb7, 0xf9, 0x33, 0x69, 0xa9, 0x95, 0x8a, 0x75, 0x30, 0x85, - 0xba, 0x54, 0xda, 0xa5, 0xa1, 0x12, 0x51, 0x51, 0xf9, 0x63, 0xbb, 0x10, 0x53, 0x14, 0x55, 0xeb, - 0xe4, 0xc2, 0x65, 0x35, 0xde, 0x9d, 0xba, 0xab, 0x7a, 0x77, 0x46, 0x33, 0xe3, 0x2a, 0xae, 0x38, - 0xf0, 0x11, 0x90, 0xf8, 0x10, 0xc0, 0x91, 0x6f, 0xc1, 0xb1, 0xc7, 0x22, 0xa1, 0x88, 0x6e, 0x85, - 0xc4, 0xb1, 0x17, 0xee, 0x68, 0x66, 0x76, 0xd7, 0x36, 0x6d, 0xd2, 0x72, 0xb1, 0xdf, 0xcc, 0xfc, - 0x7e, 0xbf, 0x79, 0xef, 0xcd, 0x9b, 0x37, 0x0b, 0x1f, 0x0a, 0x1e, 0xb8, 0x8f, 0xa2, 0xc7, 0x11, - 0xe1, 0xae, 0x20, 0xfc, 0x51, 0x14, 0x10, 0xe1, 0xc6, 0x44, 0xe2, 0x10, 0x4b, 0xec, 0x0a, 0x49, - 0x39, 0x61, 0x43, 0xf3, 0xef, 0x30, 0x4e, 0x25, 0x45, 0x97, 0xd9, 0xb1, 0x63, 0x08, 0x4e, 0x4e, - 0x70, 0x72, 0xc2, 0xf6, 0x85, 0x11, 0x1d, 0x51, 0x8d, 0x73, 0x95, 0x65, 0x28, 0xdb, 0xcd, 0x11, - 0xa5, 0xa3, 0x31, 0x71, 0xf5, 0x68, 0x38, 0xb9, 0xef, 0xca, 0x28, 0x26, 0x42, 0xe2, 0x98, 0xe5, - 0x00, 0xe5, 0x05, 0x66, 0x91, 0x41, 0xb8, 0x93, 0x49, 0x14, 0xb2, 0xa1, 0xfe, 0xcb, 0x00, 0xb7, - 0x15, 0x20, 0xc0, 0x3c, 0xa1, 0xd2, 0x65, 0x63, 0x9c, 0x24, 0x84, 0xbb, 0xe1, 0x34, 0xc1, 0x71, - 0x14, 0xf8, 0x92, 0xe3, 0x20, 0x4a, 0x46, 0x6e, 0xc4, 0xdd, 0x31, 0x1d, 0x45, 0x01, 0x1e, 0xb3, - 0x61, 0x6e, 0x65, 0xf4, 0xf7, 0x34, 0x9d, 0xc6, 0x31, 0x4d, 0xdc, 0x21, 0x16, 0xc4, 0x15, 0x12, - 0xcb, 0x89, 0xd0, 0x91, 0x29, 0x23, 0x83, 0xb5, 0x15, 0x4c, 0x3c, 0xc0, 0x9c, 0x84, 0xee, 0xc3, - 0xbd, 0x59, 0x1e, 0xd8, 0xb0, 0x30, 0x33, 0xe4, 0x95, 0x39, 0xa4, 0x9c, 0x32, 0x22, 0xcc, 0x2f, - 0x1b, 0x9a, 0x7f, 0x83, 0x6a, 0xfd, 0x63, 0xc1, 0xda, 0x21, 0xc7, 0x01, 0x61, 0x34, 0x4a, 0x64, - 0x3f, 0xb9, 0x4f, 0xd1, 0x55, 0x58, 0x8a, 0xc2, 0x86, 0xb5, 0x63, 0xb5, 0xeb, 0xbb, 0xeb, 0x0e, - 0x3b, 0x76, 0x4c, 0xac, 0xce, 0xd1, 0x51, 0xbf, 0xd7, 0x59, 0x49, 0x4f, 0x9a, 0x4b, 0xfd, 0x9e, - 0xb7, 0x14, 0x85, 0x68, 0x08, 0x20, 0x0b, 0x6a, 0x63, 0x49, 0x13, 0x3a, 0x8a, 0x60, 0xb2, 0xe0, - 0x64, 0x59, 0x70, 0xfe, 0x93, 0x05, 0x27, 0xe2, 0x4e, 0x1e, 0xfb, 0x6c, 0xeb, 0x1e, 0x61, 0x63, - 0x3a, 0x8d, 0x49, 0x22, 0xbd, 0x39, 0x55, 0x84, 0xa0, 0x92, 0xe0, 0x98, 0x34, 0xca, 0x3b, 0x56, - 0xbb, 0xe6, 0x69, 0x1b, 0x75, 0x60, 0x8d, 0x1c, 0x33, 0x12, 0x48, 0x12, 0xfa, 0x2a, 0x39, 0xa4, - 0x51, 0xd9, 0xb1, 0xda, 0x6b, 0xbb, 0x97, 0xd5, 0xde, 0x79, 0xda, 0x9c, 0x6f, 0xa2, 0xfb, 0xa4, - 0x3b, 0x0d, 0xc6, 0x64, 0xa0, 0x20, 0xde, 0xf9, 0x9c, 0xa2, 0x87, 0xad, 0xdf, 0x2d, 0xb8, 0xf8, - 0xc5, 0x88, 0x24, 0x72, 0xe6, 0xc1, 0x40, 0x33, 0xd1, 0x0d, 0x58, 0x36, 0xa2, 0xd6, 0xeb, 0x45, - 0x0d, 0x12, 0x5d, 0x87, 0x15, 0x83, 0xc8, 0x92, 0xb0, 0xb5, 0xc0, 0x31, 0xba, 0x5e, 0x06, 0xc9, - 0xd2, 0x5b, 0x7e, 0x7d, 0x7a, 0x3f, 0x86, 0x2a, 0x56, 0x1e, 0xfa, 0x51, 0xa8, 0x03, 0x7c, 0x05, - 0xbc, 0x9e, 0x9e, 0x34, 0x57, 0x75, 0x18, 0xfd, 0x9e, 0xb7, 0xaa, 0xd1, 0xfd, 0xb0, 0xf5, 0x6b, - 0x05, 0x6a, 0x87, 0x78, 0x38, 0x26, 0xfa, 0x38, 0xf3, 0x0c, 0x5a, 0x73, 0x19, 0x44, 0x50, 0x09, - 0x89, 0x08, 0x1a, 0xab, 0x66, 0x4e, 0xd9, 0xa8, 0x03, 0x48, 0x48, 0xcc, 0xa5, 0x5f, 0x54, 0xbe, - 0x9f, 0x98, 0x80, 0xca, 0x9d, 0x0b, 0xe9, 0x49, 0x73, 0x63, 0xa0, 0x56, 0x0f, 0xf3, 0xc5, 0x83, - 0x81, 0xb7, 0x21, 0x16, 0x67, 0x04, 0xfa, 0x0c, 0x36, 0x85, 0xa4, 0x6c, 0x51, 0xa2, 0xac, 0x25, - 0xb6, 0xd2, 0x93, 0xe6, 0xfa, 0x40, 0x52, 0x36, 0xaf, 0xb0, 0x2e, 0x16, 0x26, 0x04, 0xba, 0x0b, - 0xab, 0x01, 0x1d, 0x4f, 0xe2, 0x44, 0x34, 0x2a, 0x3b, 0xe5, 0x76, 0x7d, 0xf7, 0x86, 0x73, 0xc6, - 0x5d, 0x76, 0x8a, 0x28, 0x9d, 0xae, 0x66, 0x29, 0xd3, 0xcb, 0x15, 0x90, 0x0d, 0x20, 0x15, 0x40, - 0x46, 0x8f, 0x49, 0xd8, 0x58, 0xde, 0xb1, 0xda, 0x55, 0x6f, 0x6e, 0x06, 0x5d, 0x87, 0xcd, 0x7c, - 0x84, 0x65, 0x44, 0x13, 0xff, 0x21, 0x99, 0x36, 0x56, 0x74, 0x4a, 0x36, 0x16, 0x16, 0xee, 0x92, - 0xe9, 0xf6, 0x1f, 0x16, 0xc0, 0x6c, 0x93, 0x57, 0x66, 0xd5, 0x85, 0x9a, 0xf2, 0xca, 0x57, 0xf7, - 0x4b, 0x27, 0x6e, 0x6d, 0x17, 0x29, 0xf7, 0xcd, 0x7d, 0xeb, 0x61, 0x89, 0x0f, 0xa7, 0x8c, 0x78, - 0xd5, 0x30, 0xb3, 0xd0, 0x1e, 0x9c, 0x63, 0x58, 0x4a, 0xc2, 0x13, 0xc3, 0x29, 0x6b, 0xce, 0xc5, - 0x19, 0xe7, 0x9e, 0x59, 0xd5, 0xb4, 0x3a, 0x9b, 0x0d, 0x8a, 0x03, 0xac, 0xcc, 0x1d, 0xe0, 0x27, - 0x70, 0x5e, 0x90, 0x18, 0x27, 0x52, 0x5d, 0x35, 0x25, 0xb7, 0xac, 0xe5, 0xde, 0x9a, 0xc9, 0x0d, - 0xb2, 0x65, 0xad, 0x77, 0x4e, 0xcc, 0x8d, 0x5a, 0xbf, 0x94, 0x61, 0xad, 0x4b, 0x63, 0x36, 0x51, - 0x37, 0x24, 0x78, 0x40, 0x62, 0x8c, 0x3e, 0x85, 0x15, 0x9d, 0x05, 0xd1, 0xb0, 0xf4, 0x51, 0xbc, - 0xff, 0x66, 0x47, 0xe1, 0x65, 0x2c, 0xf4, 0xa3, 0x05, 0x97, 0xb4, 0xe9, 0xab, 0xec, 0xf8, 0x92, - 0xfa, 0x79, 0x39, 0xab, 0xb2, 0x52, 0x8a, 0xbd, 0x33, 0x15, 0x17, 0xdd, 0x31, 0x1b, 0x1c, 0xe0, - 0x98, 0x1c, 0x52, 0x53, 0xf1, 0xa1, 0xb8, 0x93, 0x48, 0x3e, 0xed, 0x5c, 0x4a, 0x4f, 0x9a, 0x5b, - 0x2f, 0xad, 0xf6, 0x84, 0xb7, 0x25, 0x5f, 0xa6, 0x6c, 0x77, 0xa1, 0x9a, 0x03, 0x16, 0x6e, 0x98, - 0x89, 0xf1, 0xcd, 0x6e, 0xd8, 0xf6, 0x77, 0xd0, 0x38, 0xcd, 0x1d, 0xb4, 0x01, 0x65, 0x55, 0x47, - 0xa6, 0x30, 0x94, 0x89, 0xbe, 0x86, 0xe5, 0x47, 0x78, 0x3c, 0x21, 0x59, 0x77, 0xb8, 0xf9, 0x7f, - 0xa2, 0x2e, 0x82, 0x31, 0x12, 0xb7, 0x96, 0xf6, 0xac, 0xd6, 0x4f, 0x15, 0xa8, 0xdf, 0xdd, 0x13, - 0x1e, 0x11, 0x74, 0xc2, 0x03, 0x82, 0x6e, 0x40, 0x99, 0xd1, 0xbc, 0x63, 0xbf, 0xad, 0x7b, 0x8f, - 0x6e, 0xfb, 0xce, 0xc3, 0xbd, 0x99, 0x30, 0x1b, 0x3a, 0xf7, 0x68, 0xb8, 0x5f, 0xf2, 0x14, 0x16, - 0xf5, 0xa1, 0x16, 0xd0, 0x44, 0xe2, 0x28, 0x21, 0x3c, 0x73, 0xeb, 0xda, 0xe9, 0xc4, 0x6e, 0x0e, - 0x3d, 0x62, 0x21, 0x96, 0x64, 0xbf, 0xe4, 0xcd, 0xd8, 0xe8, 0x36, 0xac, 0x66, 0x51, 0x64, 0x4d, - 0xed, 0x9d, 0xd3, 0x85, 0x06, 0x06, 0xb8, 0x5f, 0xf2, 0x72, 0x0e, 0xea, 0x42, 0x8d, 0x24, 0xa1, - 0x6e, 0xc0, 0x22, 0x6b, 0x73, 0xef, 0x9e, 0x2e, 0x70, 0x27, 0x87, 0x2a, 0x1f, 0x0a, 0x9e, 0x12, - 0x51, 0x35, 0x26, 0x18, 0x0e, 0x4c, 0xd9, 0x9f, 0x29, 0x72, 0x90, 0x43, 0x95, 0x48, 0xc1, 0x43, - 0x37, 0xa1, 0x92, 0xd0, 0x90, 0xe8, 0x0e, 0x50, 0xdf, 0xb5, 0xcf, 0xe0, 0xd3, 0x50, 0x51, 0x35, - 0x1a, 0x7d, 0x05, 0x75, 0x4e, 0xd8, 0x38, 0x0a, 0xb0, 0x2f, 0x88, 0xd4, 0x1d, 0xb5, 0xbe, 0x7b, - 0xe5, 0x74, 0xb2, 0x67, 0xc0, 0x03, 0x22, 0xf7, 0x4b, 0x1e, 0xf0, 0x62, 0x84, 0xbe, 0x04, 0x08, - 0x8b, 0x37, 0xb0, 0x51, 0x7d, 0x9d, 0xce, 0xec, 0xbd, 0x54, 0x3a, 0x33, 0x66, 0x07, 0xa0, 0xca, - 0xb3, 0xca, 0x68, 0x1d, 0xc1, 0xe6, 0x5c, 0xa1, 0x98, 0xd3, 0x43, 0x9f, 0xc3, 0xca, 0x44, 0x5b, - 0x59, 0xc5, 0xb4, 0xcf, 0x72, 0x76, 0x9e, 0xe9, 0x65, 0xbc, 0xd6, 0x5f, 0x4b, 0xb0, 0xd1, 0xe5, - 0x34, 0x19, 0x04, 0x3c, 0x62, 0xd2, 0x23, 0x62, 0x32, 0x96, 0xe8, 0x16, 0xd4, 0x84, 0x1e, 0xfb, - 0xa7, 0x7f, 0x3d, 0x9c, 0x4b, 0x4f, 0x9a, 0x55, 0xc3, 0xea, 0xf7, 0xbc, 0xaa, 0xc1, 0xf7, 0x43, - 0xb4, 0x07, 0xb5, 0xe2, 0xc9, 0xc8, 0xca, 0x71, 0xdb, 0x31, 0x5f, 0x64, 0x4e, 0xfe, 0x45, 0xe6, - 0x14, 0xef, 0x84, 0x37, 0x03, 0xa3, 0x6b, 0xb0, 0x4c, 0x38, 0xa7, 0x3c, 0xab, 0xbd, 0x57, 0xbe, - 0xbc, 0x06, 0x81, 0x3e, 0x80, 0x4d, 0x72, 0x4c, 0x82, 0x89, 0x6e, 0xf5, 0x4a, 0xc1, 0x4f, 0x4c, - 0xc5, 0x95, 0xbd, 0xf5, 0x62, 0x41, 0x6d, 0x72, 0x20, 0x90, 0x03, 0x5b, 0x01, 0x8d, 0x59, 0x34, - 0xc6, 0x0b, 0xe8, 0x65, 0x8d, 0xde, 0x9c, 0x5b, 0xca, 0xf0, 0x57, 0x61, 0x7d, 0x38, 0x95, 0x44, - 0xf8, 0x8c, 0xd3, 0x80, 0x08, 0x41, 0x42, 0x5d, 0x46, 0x65, 0x6f, 0x4d, 0x4f, 0xdf, 0xcb, 0x67, - 0xd5, 0x9b, 0xc3, 0x49, 0x40, 0x79, 0x38, 0x0f, 0x5d, 0xd5, 0xd0, 0x8d, 0x6c, 0xa1, 0x00, 0x77, - 0x6e, 0x3f, 0x79, 0x66, 0x97, 0x9e, 0x3e, 0xb3, 0x4b, 0x2f, 0x9e, 0xd9, 0xd6, 0xf7, 0xa9, 0x6d, - 0xfd, 0x9c, 0xda, 0xd6, 0x6f, 0xa9, 0x6d, 0x3d, 0x49, 0x6d, 0xeb, 0xcf, 0xd4, 0xb6, 0xfe, 0x4e, - 0xed, 0xd2, 0x8b, 0xd4, 0xb6, 0x7e, 0x78, 0x6e, 0x97, 0x9e, 0x3c, 0xb7, 0x4b, 0x4f, 0x9f, 0xdb, - 0xa5, 0x6f, 0x57, 0xb3, 0x4f, 0xe2, 0xe1, 0x8a, 0x4e, 0xdd, 0x47, 0xff, 0x06, 0x00, 0x00, 0xff, - 0xff, 0xbb, 0xe3, 0x37, 0x9a, 0x41, 0x0b, 0x00, 0x00, + // 1352 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4d, 0x6f, 0x1b, 0xb7, + 0x16, 0xd5, 0x58, 0xb2, 0x2d, 0x5f, 0x25, 0xfe, 0xa0, 0x93, 0x17, 0xc1, 0xc1, 0x1b, 0xf9, 0xf9, + 0xe5, 0xbd, 0x38, 0x0d, 0x30, 0xd3, 0xb8, 0x41, 0x6b, 0xa4, 0x48, 0x3f, 0x64, 0x25, 0xb5, 0x9a, + 0xc2, 0x08, 0x46, 0x36, 0x0a, 0x74, 0x33, 0xa0, 0x66, 0x68, 0x65, 0x10, 0xcd, 0x90, 0x20, 0xa9, + 0xc0, 0x0a, 0xba, 0xe8, 0x4f, 0x28, 0xd0, 0x1f, 0xd1, 0xf6, 0x9f, 0x74, 0x19, 0x74, 0x95, 0x02, + 0x85, 0xd1, 0x28, 0x28, 0x50, 0x74, 0x95, 0x4d, 0xf7, 0x05, 0xc9, 0xf9, 0x52, 0x63, 0x3b, 0xc9, + 0xa2, 0x9b, 0x6e, 0xac, 0x4b, 0xf2, 0x9c, 0x33, 0x3c, 0x97, 0x77, 0x2e, 0xc7, 0xf0, 0xb6, 0xe0, + 0x81, 0xfb, 0x28, 0x7a, 0x1c, 0x11, 0xee, 0x0a, 0xc2, 0x1f, 0x45, 0x01, 0x11, 0x6e, 0x4c, 0x24, + 0x0e, 0xb1, 0xc4, 0xae, 0x90, 0x94, 0x13, 0xd6, 0x37, 0xbf, 0x0e, 0xe3, 0x54, 0x52, 0x74, 0x99, + 0x1d, 0x39, 0x86, 0xe0, 0x64, 0x04, 0x27, 0x23, 0xac, 0x5d, 0x18, 0xd0, 0x01, 0xd5, 0x38, 0x57, + 0x45, 0x86, 0xb2, 0xd6, 0x1a, 0x50, 0x3a, 0x18, 0x12, 0x57, 0x8f, 0xfa, 0xa3, 0x43, 0x57, 0x46, + 0x31, 0x11, 0x12, 0xc7, 0x2c, 0x03, 0xa8, 0x5d, 0x60, 0x16, 0x19, 0x84, 0x3b, 0x1a, 0x45, 0x21, + 0xeb, 0xeb, 0x9f, 0x14, 0x70, 0x5b, 0x01, 0x02, 0xcc, 0x13, 0x2a, 0x5d, 0x36, 0xc4, 0x49, 0x42, + 0xb8, 0x1b, 0x8e, 0x13, 0x1c, 0x47, 0x81, 0x2f, 0x39, 0x0e, 0xa2, 0x64, 0xe0, 0x46, 0xdc, 0x1d, + 0xd2, 0x41, 0x14, 0xe0, 0x21, 0xeb, 0x67, 0x51, 0x4a, 0x77, 0x4f, 0xa0, 0x1f, 0x46, 0x43, 0xe2, + 0x0b, 0x3a, 0xe2, 0x01, 0x29, 0x51, 0x53, 0xc2, 0xff, 0x34, 0x81, 0xc6, 0x31, 0x4d, 0xdc, 0x3e, + 0x16, 0xc4, 0x15, 0x12, 0xcb, 0x91, 0xd0, 0xa9, 0x50, 0x41, 0x0a, 0xdb, 0x54, 0x30, 0xf1, 0x00, + 0x73, 0x12, 0xba, 0x0f, 0xb7, 0x8b, 0xc4, 0xb1, 0x7e, 0x1e, 0xa6, 0xc8, 0x2b, 0x25, 0xa4, 0x1c, + 0x33, 0x22, 0xcc, 0x5f, 0xd6, 0x37, 0xbf, 0x06, 0xb5, 0xf1, 0x87, 0x05, 0x8b, 0xfb, 0x1c, 0x07, + 0x84, 0xd1, 0x28, 0x91, 0xdd, 0xe4, 0x90, 0xa2, 0xab, 0x30, 0x13, 0x85, 0x4d, 0x6b, 0xdd, 0xda, + 0x6c, 0x6c, 0x2d, 0x39, 0xec, 0xc8, 0x31, 0xc9, 0x71, 0x0e, 0x0e, 0xba, 0x9d, 0xf6, 0xdc, 0xe4, + 0xb8, 0x35, 0xd3, 0xed, 0x78, 0x33, 0x51, 0x88, 0xfa, 0x00, 0x32, 0xa7, 0x36, 0x67, 0x34, 0xa1, + 0xad, 0x08, 0xc6, 0xb7, 0x93, 0xfa, 0x76, 0xfe, 0x92, 0x36, 0x27, 0xe2, 0x4e, 0xe6, 0xbd, 0x78, + 0x74, 0x87, 0xb0, 0x21, 0x1d, 0xc7, 0x24, 0x91, 0x5e, 0x49, 0x15, 0x21, 0xa8, 0x25, 0x38, 0x26, + 0xcd, 0xea, 0xba, 0xb5, 0xb9, 0xe0, 0xe9, 0x18, 0xb5, 0x61, 0x91, 0x1c, 0x31, 0x12, 0x48, 0x12, + 0xfa, 0x2a, 0x39, 0xa4, 0x59, 0x5b, 0xb7, 0x36, 0x17, 0xb7, 0x2e, 0xab, 0x67, 0x67, 0x69, 0x73, + 0x3e, 0x8b, 0x0e, 0xc9, 0xce, 0x38, 0x18, 0x92, 0x9e, 0x82, 0x78, 0xe7, 0x33, 0x8a, 0x1e, 0x6e, + 0xfc, 0x6e, 0xc1, 0xe2, 0xdd, 0x68, 0x48, 0x7a, 0xfa, 0x38, 0xde, 0xcc, 0xf7, 0xe7, 0xd0, 0x28, + 0x1d, 0x65, 0x6a, 0xfc, 0xdd, 0x13, 0x8c, 0x97, 0x50, 0xca, 0x74, 0xf1, 0xbc, 0xb2, 0xd9, 0xc3, + 0x7c, 0xf6, 0x6f, 0x33, 0xfb, 0x93, 0x05, 0x17, 0x3f, 0x1e, 0x90, 0x44, 0x16, 0xe9, 0xee, 0x69, + 0x26, 0xba, 0x01, 0xb3, 0x46, 0xd4, 0x7a, 0xb5, 0xa8, 0x41, 0xa2, 0xeb, 0x30, 0x67, 0x10, 0xa9, + 0xf1, 0xd5, 0x29, 0x8e, 0xd1, 0xf5, 0x52, 0x48, 0x9a, 0xd3, 0xea, 0xab, 0x73, 0xfa, 0x1e, 0xd4, + 0xb1, 0xda, 0xa1, 0x1f, 0x85, 0xda, 0xe0, 0x09, 0xf0, 0xc6, 0xe4, 0xb8, 0x35, 0xaf, 0x6d, 0x74, + 0x3b, 0xde, 0xbc, 0x46, 0x77, 0xc3, 0xc2, 0x5b, 0x91, 0xdd, 0x7f, 0x8c, 0xb7, 0x1f, 0x6b, 0xb0, + 0xb0, 0x8f, 0xfb, 0x43, 0x53, 0x9f, 0x59, 0x75, 0x58, 0xa5, 0xea, 0x40, 0x50, 0x0b, 0x89, 0x08, + 0x9a, 0xf3, 0x66, 0x4e, 0xc5, 0xa8, 0x0d, 0x48, 0x48, 0xcc, 0xa5, 0x9f, 0xf7, 0x3c, 0x3f, 0x31, + 0x86, 0xaa, 0xed, 0x0b, 0x93, 0xe3, 0xd6, 0x72, 0x4f, 0xad, 0xee, 0x67, 0x8b, 0x7b, 0x3d, 0x6f, + 0x59, 0x4c, 0xcf, 0x08, 0xf4, 0x21, 0xac, 0x08, 0x49, 0xd9, 0xb4, 0x44, 0x55, 0x4b, 0xac, 0x4e, + 0x8e, 0x5b, 0x4b, 0x3d, 0x49, 0x59, 0x59, 0x61, 0x49, 0x4c, 0x4d, 0x08, 0x74, 0x0f, 0xe6, 0x03, + 0x3a, 0x1c, 0xc5, 0x89, 0x68, 0xd6, 0xd6, 0xab, 0x9b, 0x8d, 0xad, 0x1b, 0xce, 0x19, 0x5d, 0xdc, + 0xc9, 0x5d, 0x3a, 0x3b, 0x9a, 0xa5, 0x42, 0x2f, 0x53, 0x40, 0x36, 0x80, 0x54, 0x00, 0x19, 0x3d, + 0x26, 0x61, 0x73, 0x76, 0xdd, 0xda, 0xac, 0x7b, 0xa5, 0x19, 0x74, 0x1d, 0x56, 0xb2, 0x11, 0x96, + 0x11, 0x4d, 0xfc, 0x87, 0x64, 0xdc, 0x9c, 0xd3, 0x29, 0x59, 0x9e, 0x5a, 0xb8, 0x47, 0xc6, 0xa8, + 0x05, 0x8d, 0x78, 0x24, 0x0d, 0x2e, 0x0a, 0x9b, 0x75, 0x0d, 0x83, 0x6c, 0xaa, 0x1b, 0xae, 0xfd, + 0x6c, 0x01, 0x14, 0xbb, 0x38, 0x31, 0xed, 0x2e, 0x2c, 0xa8, 0x6d, 0xfb, 0xaa, 0x93, 0xea, 0xcc, + 0x2e, 0x6e, 0x21, 0xe5, 0xcf, 0x74, 0xd6, 0x0e, 0x96, 0x78, 0x7f, 0xcc, 0x88, 0x57, 0x0f, 0xd3, + 0x08, 0x6d, 0xc3, 0x39, 0x86, 0xa5, 0x24, 0x3c, 0x31, 0x9c, 0xaa, 0xe6, 0x5c, 0x2c, 0x38, 0xf7, + 0xcd, 0xaa, 0xa6, 0x35, 0x58, 0x31, 0xc8, 0x4f, 0xb8, 0x56, 0x3a, 0xe1, 0xf7, 0xe1, 0xbc, 0x20, + 0x31, 0x4e, 0xa4, 0x6a, 0xaa, 0x4a, 0x6e, 0x56, 0xcb, 0xfd, 0xab, 0x90, 0xeb, 0xa5, 0xcb, 0x5a, + 0xef, 0x9c, 0x28, 0x8d, 0x36, 0xbe, 0xaf, 0xc2, 0xe2, 0x0e, 0x8d, 0xd9, 0x48, 0xb5, 0x87, 0xe0, + 0x01, 0x89, 0x31, 0xfa, 0x00, 0xe6, 0x74, 0x9a, 0x44, 0xd3, 0xd2, 0x67, 0xf5, 0xff, 0xd7, 0x3b, + 0x2b, 0x2f, 0x65, 0xa1, 0x6f, 0x2c, 0xb8, 0xa4, 0x43, 0x5f, 0x65, 0xc7, 0x97, 0xd4, 0xcf, 0xea, + 0x5d, 0xd5, 0x9d, 0x52, 0xec, 0x9c, 0xa9, 0x38, 0xbd, 0x1d, 0xf3, 0x80, 0x3d, 0x1c, 0x93, 0x7d, + 0x6a, 0x5e, 0x89, 0x50, 0xdc, 0x49, 0x24, 0x1f, 0xb7, 0x2f, 0x4d, 0x8e, 0x5b, 0xab, 0x2f, 0xad, + 0x76, 0x84, 0xb7, 0x2a, 0x5f, 0xa6, 0xac, 0xed, 0x40, 0x3d, 0x03, 0x4c, 0xbd, 0x82, 0xc6, 0xe3, + 0xeb, 0xbd, 0x82, 0x6b, 0x5f, 0x42, 0xf3, 0xb4, 0xed, 0xa0, 0x65, 0xa8, 0xaa, 0x42, 0x33, 0x85, + 0xa1, 0x42, 0xf4, 0x29, 0xcc, 0x3e, 0xc2, 0xc3, 0x51, 0x76, 0x27, 0xdc, 0x7c, 0x13, 0xd7, 0xb9, + 0x19, 0x23, 0x71, 0x6b, 0x66, 0xdb, 0xda, 0xf8, 0xb6, 0x06, 0x8d, 0x7b, 0xdb, 0xc2, 0x23, 0xe6, + 0x12, 0x41, 0x37, 0xa0, 0xca, 0x68, 0x76, 0x47, 0xfd, 0x5b, 0x37, 0x27, 0x7d, 0xc1, 0x3b, 0x0f, + 0xb7, 0x0b, 0x61, 0xd6, 0x77, 0xee, 0xd3, 0x70, 0xb7, 0xe2, 0x29, 0x2c, 0xea, 0xc2, 0x42, 0x40, + 0x13, 0x89, 0xa3, 0x84, 0xf0, 0x74, 0x5b, 0xd7, 0x4e, 0x27, 0xee, 0x64, 0xd0, 0x03, 0x16, 0x62, + 0x49, 0x76, 0x2b, 0x5e, 0xc1, 0x46, 0xb7, 0x61, 0x3e, 0x75, 0x91, 0x76, 0xbd, 0xff, 0x9c, 0x2e, + 0xd4, 0x33, 0xc0, 0xdd, 0x8a, 0x97, 0x71, 0xd0, 0x0e, 0x2c, 0x90, 0x24, 0xd4, 0xb7, 0x8f, 0x48, + 0xfb, 0xe0, 0x7f, 0x4f, 0x17, 0xb8, 0x93, 0x41, 0xd5, 0x1e, 0x72, 0x9e, 0x12, 0x51, 0x35, 0x26, + 0x18, 0x0e, 0x4c, 0xd9, 0x9f, 0x29, 0xb2, 0x97, 0x41, 0x95, 0x48, 0xce, 0x43, 0x37, 0xa1, 0x96, + 0xd0, 0x90, 0xe8, 0x16, 0xd1, 0xd8, 0xb2, 0xcf, 0xe0, 0xd3, 0x50, 0x51, 0x35, 0x1a, 0x7d, 0x02, + 0x0d, 0x4e, 0xd8, 0x30, 0x0a, 0xb0, 0x2f, 0x88, 0xd4, 0x2d, 0xb7, 0xb1, 0x75, 0xe5, 0x74, 0xb2, + 0x67, 0xc0, 0x3d, 0x22, 0x77, 0x2b, 0x1e, 0xf0, 0x7c, 0x84, 0xee, 0x02, 0x84, 0xf9, 0x07, 0x80, + 0x6e, 0x40, 0x67, 0xea, 0x14, 0x1f, 0x0b, 0x4a, 0xa7, 0x60, 0xb6, 0x01, 0xea, 0x3c, 0xad, 0x8c, + 0x8d, 0x03, 0x58, 0x29, 0x15, 0x8a, 0x39, 0x3d, 0xf4, 0x11, 0xcc, 0x8d, 0x74, 0x94, 0x56, 0xcc, + 0xe6, 0x59, 0x9b, 0x2d, 0x33, 0xbd, 0x94, 0xb7, 0xf1, 0xeb, 0x0c, 0x2c, 0xef, 0x70, 0x9a, 0xf4, + 0x02, 0x1e, 0x31, 0xe9, 0x11, 0x31, 0x1a, 0x4a, 0x74, 0x0b, 0x16, 0x84, 0x1e, 0xfb, 0xa7, 0x7f, + 0x2f, 0x9d, 0x9b, 0x1c, 0xb7, 0xea, 0x86, 0xd5, 0xed, 0x78, 0x75, 0x83, 0xef, 0x86, 0x68, 0x1b, + 0x16, 0xf2, 0x3b, 0x25, 0x2d, 0xc7, 0x35, 0xc7, 0x7c, 0xac, 0x3b, 0xd9, 0xc7, 0xba, 0x93, 0x5f, + 0x24, 0x5e, 0x01, 0x46, 0xd7, 0x60, 0x96, 0x70, 0x4e, 0x79, 0x5a, 0x7b, 0x27, 0x5e, 0xcd, 0x06, + 0x81, 0xde, 0x82, 0x15, 0x72, 0x44, 0x82, 0x91, 0xee, 0xf1, 0x4a, 0xc1, 0x4f, 0x4c, 0xc5, 0x55, + 0xbd, 0xa5, 0x7c, 0x41, 0x3d, 0x64, 0x4f, 0x20, 0x07, 0x56, 0x03, 0x1a, 0xb3, 0x68, 0x88, 0xa7, + 0xd0, 0xb3, 0x1a, 0xbd, 0x52, 0x5a, 0x4a, 0xf1, 0x57, 0x61, 0xa9, 0x3f, 0x96, 0x44, 0xf8, 0x8c, + 0xd3, 0x80, 0x08, 0x41, 0x42, 0x5d, 0x46, 0x55, 0x6f, 0x51, 0x4f, 0xdf, 0xcf, 0x66, 0xd5, 0xa5, + 0xc4, 0x49, 0x40, 0x79, 0x58, 0x86, 0xce, 0x6b, 0xe8, 0x72, 0xba, 0x90, 0x83, 0xdb, 0xb7, 0x9f, + 0x3c, 0xb3, 0x2b, 0x4f, 0x9f, 0xd9, 0x95, 0x17, 0xcf, 0x6c, 0xeb, 0xab, 0x89, 0x6d, 0x7d, 0x37, + 0xb1, 0xad, 0x1f, 0x26, 0xb6, 0xf5, 0x64, 0x62, 0x5b, 0xbf, 0x4c, 0x6c, 0xeb, 0xb7, 0x89, 0x5d, + 0x79, 0x31, 0xb1, 0xad, 0xaf, 0x9f, 0xdb, 0x95, 0x27, 0xcf, 0xed, 0xca, 0xd3, 0xe7, 0x76, 0xe5, + 0x8b, 0xf9, 0xf4, 0xbf, 0xa5, 0xfe, 0x9c, 0x4e, 0xdd, 0x3b, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, + 0xa4, 0x06, 0x48, 0xe3, 0x5c, 0x0d, 0x00, 0x00, } func (this *TracepointInfo) Equal(that interface{}) bool { @@ -857,6 +1007,39 @@ func (this *TracepointInfo) Equal(that interface{}) bool { } return true } +func (this *FileSourceInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FileSourceInfo) + if !ok { + that2, ok := that.(FileSourceInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + if !this.FileSource.Equal(that1.FileSource) { + return false + } + if this.Name != that1.Name { + return false + } + if this.ExpectedState != that1.ExpectedState { + return false + } + return true +} func (this *AgentTracepointStatus) Equal(that interface{}) bool { if that == nil { return this == nil @@ -890,6 +1073,39 @@ func (this *AgentTracepointStatus) Equal(that interface{}) bool { } return true } +func (this *AgentFileSourceStatus) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*AgentFileSourceStatus) + if !ok { + that2, ok := that.(AgentFileSourceStatus) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.State != that1.State { + return false + } + if !this.Status.Equal(that1.Status) { + return false + } + if !this.ID.Equal(that1.ID) { + return false + } + if !this.AgentID.Equal(that1.AgentID) { + return false + } + return true +} func (this *TableInfo) Equal(that interface{}) bool { if that == nil { return this == nil @@ -935,6 +1151,9 @@ func (this *TableInfo) Equal(that interface{}) bool { if this.TabletizationKey != that1.TabletizationKey { return false } + if this.MutationId != that1.MutationId { + return false + } return true } func (this *TableInfo_ColumnInfo) Equal(that interface{}) bool { @@ -1344,6 +1563,23 @@ func (this *TracepointInfo) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *FileSourceInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&storepb.FileSourceInfo{") + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + if this.FileSource != nil { + s = append(s, "FileSource: "+fmt.Sprintf("%#v", this.FileSource)+",\n") + } + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "ExpectedState: "+fmt.Sprintf("%#v", this.ExpectedState)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *AgentTracepointStatus) GoString() string { if this == nil { return "nil" @@ -1363,11 +1599,30 @@ func (this *AgentTracepointStatus) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *AgentFileSourceStatus) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&storepb.AgentFileSourceStatus{") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + if this.Status != nil { + s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + } + if this.ID != nil { + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + } + if this.AgentID != nil { + s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *TableInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 11) + s := make([]string, 0, 12) s = append(s, "&storepb.TableInfo{") s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") s = append(s, "Desc: "+fmt.Sprintf("%#v", this.Desc)+",\n") @@ -1378,6 +1633,7 @@ func (this *TableInfo) GoString() string { } s = append(s, "Tabletized: "+fmt.Sprintf("%#v", this.Tabletized)+",\n") s = append(s, "TabletizationKey: "+fmt.Sprintf("%#v", this.TabletizationKey)+",\n") + s = append(s, "MutationId: "+fmt.Sprintf("%#v", this.MutationId)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1609,7 +1865,7 @@ func (m *TracepointInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *AgentTracepointStatus) Marshal() (dAtA []byte, err error) { +func (m *FileSourceInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1619,31 +1875,31 @@ func (m *AgentTracepointStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AgentTracepointStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *FileSourceInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AgentTracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *FileSourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.AgentID != nil { - { - size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStore(dAtA, i, uint64(size)) - } + if m.ExpectedState != 0 { + i = encodeVarintStore(dAtA, i, uint64(m.ExpectedState)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x20 } - if m.ID != nil { + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintStore(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if m.FileSource != nil { { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.FileSource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1651,11 +1907,11 @@ func (m *AgentTracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintStore(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } - if m.Status != nil { + if m.ID != nil { { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1663,17 +1919,12 @@ func (m *AgentTracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintStore(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - if m.State != 0 { - i = encodeVarintStore(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *TableInfo) Marshal() (dAtA []byte, err error) { +func (m *AgentTracepointStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1683,25 +1934,160 @@ func (m *TableInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TableInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *AgentTracepointStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TableInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AgentTracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Desc) > 0 { - i -= len(m.Desc) - copy(dAtA[i:], m.Desc) - i = encodeVarintStore(dAtA, i, uint64(len(m.Desc))) - i-- - dAtA[i] = 0x3a - } - if len(m.TabletizationKey) > 0 { - i -= len(m.TabletizationKey) + if m.AgentID != nil { + { + size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ID != nil { + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.State != 0 { + i = encodeVarintStore(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AgentFileSourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AgentFileSourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AgentFileSourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AgentID != nil { + { + size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ID != nil { + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.State != 0 { + i = encodeVarintStore(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TableInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MutationId) > 0 { + i -= len(m.MutationId) + copy(dAtA[i:], m.MutationId) + i = encodeVarintStore(dAtA, i, uint64(len(m.MutationId))) + i-- + dAtA[i] = 0x42 + } + if len(m.Desc) > 0 { + i -= len(m.Desc) + copy(dAtA[i:], m.Desc) + i = encodeVarintStore(dAtA, i, uint64(len(m.Desc))) + i-- + dAtA[i] = 0x3a + } + if len(m.TabletizationKey) > 0 { + i -= len(m.TabletizationKey) copy(dAtA[i:], m.TabletizationKey) i = encodeVarintStore(dAtA, i, uint64(len(m.TabletizationKey))) i-- @@ -2252,6 +2638,30 @@ func (m *TracepointInfo) Size() (n int) { return n } +func (m *FileSourceInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovStore(uint64(l)) + } + if m.FileSource != nil { + l = m.FileSource.Size() + n += 1 + l + sovStore(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovStore(uint64(l)) + } + if m.ExpectedState != 0 { + n += 1 + sovStore(uint64(m.ExpectedState)) + } + return n +} + func (m *AgentTracepointStatus) Size() (n int) { if m == nil { return 0 @@ -2276,6 +2686,30 @@ func (m *AgentTracepointStatus) Size() (n int) { return n } +func (m *AgentFileSourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovStore(uint64(m.State)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovStore(uint64(l)) + } + if m.ID != nil { + l = m.ID.Size() + n += 1 + l + sovStore(uint64(l)) + } + if m.AgentID != nil { + l = m.AgentID.Size() + n += 1 + l + sovStore(uint64(l)) + } + return n +} + func (m *TableInfo) Size() (n int) { if m == nil { return 0 @@ -2309,6 +2743,10 @@ func (m *TableInfo) Size() (n int) { if l > 0 { n += 1 + l + sovStore(uint64(l)) } + l = len(m.MutationId) + if l > 0 { + n += 1 + l + sovStore(uint64(l)) + } return n } @@ -2554,6 +2992,19 @@ func (this *TracepointInfo) String() string { }, "") return s } +func (this *FileSourceInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileSourceInfo{`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `FileSource:` + strings.Replace(fmt.Sprintf("%v", this.FileSource), "FileSourceDeployment", "ir.FileSourceDeployment", 1) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ExpectedState:` + fmt.Sprintf("%v", this.ExpectedState) + `,`, + `}`, + }, "") + return s +} func (this *AgentTracepointStatus) String() string { if this == nil { return "nil" @@ -2567,6 +3018,19 @@ func (this *AgentTracepointStatus) String() string { }, "") return s } +func (this *AgentFileSourceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AgentFileSourceStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, + `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, + `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "UUID", "uuidpb.UUID", 1) + `,`, + `}`, + }, "") + return s +} func (this *TableInfo) String() string { if this == nil { return "nil" @@ -2584,6 +3048,7 @@ func (this *TableInfo) String() string { `Tabletized:` + fmt.Sprintf("%v", this.Tabletized) + `,`, `TabletizationKey:` + fmt.Sprintf("%v", this.TabletizationKey) + `,`, `Desc:` + fmt.Sprintf("%v", this.Desc) + `,`, + `MutationId:` + fmt.Sprintf("%v", this.MutationId) + `,`, `}`, }, "") return s @@ -2940,7 +3405,7 @@ func (m *TracepointInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *AgentTracepointStatus) Unmarshal(dAtA []byte) error { +func (m *FileSourceInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2963,34 +3428,15 @@ func (m *AgentTracepointStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AgentTracepointStatus: wiretype end group for non-group") + return fmt.Errorf("proto: FileSourceInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AgentTracepointStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FileSourceInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= statuspb.LifeCycleState(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3017,16 +3463,16 @@ func (m *AgentTracepointStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &statuspb.Status{} + if m.ID == nil { + m.ID = &uuidpb.UUID{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FileSource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3053,18 +3499,18 @@ func (m *AgentTracepointStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ID == nil { - m.ID = &uuidpb.UUID{} + if m.FileSource == nil { + m.FileSource = &ir.FileSourceDeployment{} } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FileSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStore @@ -3074,23 +3520,392 @@ func (m *AgentTracepointStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthStore } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthStore } if postIndex > l { return io.ErrUnexpectedEOF } - if m.AgentID == nil { - m.AgentID = &uuidpb.UUID{} + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedState", wireType) + } + m.ExpectedState = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpectedState |= statuspb.LifeCycleState(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStore(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AgentTracepointStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentTracepointStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentTracepointStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= statuspb.LifeCycleState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &statuspb.Status{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &uuidpb.UUID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentID == nil { + m.AgentID = &uuidpb.UUID{} + } + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStore(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStore + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AgentFileSourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentFileSourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentFileSourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= statuspb.LifeCycleState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &statuspb.Status{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ID == nil { + m.ID = &uuidpb.UUID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStore + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AgentID == nil { + m.AgentID = &uuidpb.UUID{} } if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3334,6 +4149,38 @@ func (m *TableInfo) Unmarshal(dAtA []byte) error { } m.Desc = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MutationId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStore + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStore + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStore + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MutationId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStore(dAtA[iNdEx:]) diff --git a/src/vizier/services/metadata/storepb/store.proto b/src/vizier/services/metadata/storepb/store.proto index 975b04d3685..4e1144a497f 100644 --- a/src/vizier/services/metadata/storepb/store.proto +++ b/src/vizier/services/metadata/storepb/store.proto @@ -26,6 +26,7 @@ import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "src/api/proto/uuidpb/uuid.proto"; import "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.proto"; +import "src/carnot/planner/file_source/ir/logical.proto"; import "src/common/base/statuspb/status.proto"; import "src/shared/k8s/metadatapb/metadata.proto"; import "src/shared/types/typespb/types.proto"; @@ -46,6 +47,18 @@ message TracepointInfo { px.statuspb.LifeCycleState expected_state = 4; } +// Information about the status of a specific file source +message FileSourceInfo { + uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; + // The file source deployment. + px.carnot.planner.file_source.ir.FileSourceDeployment file_source = 2; + // The name of the file source, not unique. + string name = 3; + // The desired state of the file source, either running or terminated. The actual + // state of the file source is derived by the states of the individual agent file sources. + px.statuspb.LifeCycleState expected_state = 4; +} + // The agent's registration status for a particular tracepoint. message AgentTracepointStatus { // The state of the tracepoint. @@ -56,6 +69,16 @@ message AgentTracepointStatus { uuidpb.UUID agent_id = 4 [ (gogoproto.customname) = "AgentID" ]; } +// The agent's registration status for a particular file source. +message AgentFileSourceStatus { + // The state of the file source. + px.statuspb.LifeCycleState state = 1; + // The status of the file source, specified if the state of the file source is not healthy. + px.statuspb.Status status = 2; + uuidpb.UUID id = 3 [ (gogoproto.customname) = "ID" ]; + uuidpb.UUID agent_id = 4 [ (gogoproto.customname) = "AgentID" ]; +} + // TableInfo contains info about the table in Vizier. message TableInfo { // Name of the table. @@ -83,6 +106,8 @@ message TableInfo { bool tabletized = 5; // The tabletization key of this schema. string tabletization_key = 6; + // ID of the mutation that created this schema, empty if unrelated to a mutation. + string mutation_id = 8; } // ComputedSchema describes the schema available on Vizier. diff --git a/src/vizier/services/query_broker/controllers/BUILD.bazel b/src/vizier/services/query_broker/controllers/BUILD.bazel index 2ccb9f3a1e9..662397ac614 100644 --- a/src/vizier/services/query_broker/controllers/BUILD.bazel +++ b/src/vizier/services/query_broker/controllers/BUILD.bazel @@ -46,6 +46,7 @@ go_library( "//src/carnot/goplanner:go_default_library", "//src/carnot/planner/compilerpb:compiler_status_pl_go_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_go_proto", + "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/carnot/planner/plannerpb:service_pl_go_proto", "//src/carnot/planpb:plan_pl_go_proto", "//src/carnot/queryresultspb:query_results_pl_go_proto", diff --git a/src/vizier/services/query_broker/controllers/errors.go b/src/vizier/services/query_broker/controllers/errors.go index c07c4eb4f1c..3ce6c74bd1b 100644 --- a/src/vizier/services/query_broker/controllers/errors.go +++ b/src/vizier/services/query_broker/controllers/errors.go @@ -29,4 +29,8 @@ var ( ErrTracepointPending = errors.New("tracepoints are still pending") // ErrConfigUpdateFailed failed to send the config update request to an agent. ErrConfigUpdateFailed = errors.New("failed to update config") + // ErrFileSourceRegistrationFailed failed to register file source. to an agent. + ErrFileSourceRegistrationFailed = errors.New("failed to register file sources") + // ErrFileSourceDeletionFailed failed to delete file source. + ErrFileSourceDeletionFailed = errors.New("failed to delete file sources") ) diff --git a/src/vizier/services/query_broker/controllers/mutation_executor.go b/src/vizier/services/query_broker/controllers/mutation_executor.go index f14ad3028de..abd0e45a267 100644 --- a/src/vizier/services/query_broker/controllers/mutation_executor.go +++ b/src/vizier/services/query_broker/controllers/mutation_executor.go @@ -30,6 +30,7 @@ import ( "px.dev/pixie/src/api/proto/uuidpb" "px.dev/pixie/src/api/proto/vizierpb" "px.dev/pixie/src/carnot/planner/distributedpb" + "px.dev/pixie/src/carnot/planner/file_source/ir" "px.dev/pixie/src/carnot/planner/plannerpb" "px.dev/pixie/src/carnot/planpb" "px.dev/pixie/src/common/base/statuspb" @@ -40,6 +41,7 @@ import ( // TracepointMap stores a map from the name to tracepoint info. type TracepointMap map[string]*TracepointInfo +type FileSourceMap map[string]*FileSourceInfo // MutationExecutor is the interface for running script mutations. type MutationExecutor interface { @@ -51,8 +53,10 @@ type MutationExecutor interface { type MutationExecutorImpl struct { planner Planner mdtp metadatapb.MetadataTracepointServiceClient + mdfs metadatapb.MetadataFileSourceServiceClient mdconf metadatapb.MetadataConfigServiceClient activeTracepoints TracepointMap + activeFileSources FileSourceMap outputTables []string distributedState *distributedpb.DistributedState } @@ -64,19 +68,29 @@ type TracepointInfo struct { Status *statuspb.Status } +type FileSourceInfo struct { + GlobPattern string + TableName string + ID uuid.UUID + Status *statuspb.Status +} + // NewMutationExecutor creates a new mutation executor. func NewMutationExecutor( planner Planner, mdtp metadatapb.MetadataTracepointServiceClient, + mdfs metadatapb.MetadataFileSourceServiceClient, mdconf metadatapb.MetadataConfigServiceClient, distributedState *distributedpb.DistributedState, ) MutationExecutor { return &MutationExecutorImpl{ planner: planner, mdtp: mdtp, + mdfs: mdfs, mdconf: mdconf, distributedState: distributedState, activeTracepoints: make(TracepointMap), + activeFileSources: make(FileSourceMap), } } @@ -87,9 +101,27 @@ func (m *MutationExecutorImpl) Execute(ctx context.Context, req *vizierpb.Execut if err != nil { return nil, err } + var otelConfig *distributedpb.OTelEndpointConfig + if convertedReq.Configs != nil && convertedReq.Configs.OTelEndpointConfig != nil { + otelConfig = &distributedpb.OTelEndpointConfig{ + URL: convertedReq.Configs.OTelEndpointConfig.URL, + Headers: convertedReq.Configs.OTelEndpointConfig.Headers, + Insecure: convertedReq.Configs.OTelEndpointConfig.Insecure, + Timeout: convertedReq.Configs.OTelEndpointConfig.Timeout, + } + } + var pluginConfig *distributedpb.PluginConfig + if req.Configs != nil && req.Configs.PluginConfig != nil { + pluginConfig = &distributedpb.PluginConfig{ + StartTimeNs: req.Configs.PluginConfig.StartTimeNs, + EndTimeNs: req.Configs.PluginConfig.EndTimeNs, + } + } convertedReq.LogicalPlannerState = &distributedpb.LogicalPlannerState{ - DistributedState: m.distributedState, - PlanOptions: planOpts, + DistributedState: m.distributedState, + PlanOptions: planOpts, + OTelEndpointConfig: otelConfig, + PluginConfig: pluginConfig, } mutations, err := m.planner.CompileMutations(convertedReq) @@ -118,6 +150,12 @@ func (m *MutationExecutorImpl) Execute(ctx context.Context, req *vizierpb.Execut Names: make([]string, 0), } configmapReqs := make([]*metadatapb.UpdateConfigRequest, 0) + fileSourceReqs := &metadatapb.RegisterFileSourceRequest{ + Requests: make([]*ir.FileSourceDeployment, 0), + } + deleteFileSourcesReq := &metadatapb.RemoveFileSourceRequest{ + Names: make([]string, 0), + } outputTablesMap := make(map[string]bool) // TODO(zasgar): We should make sure that we don't simultaneously add and delete the tracepoint. @@ -159,6 +197,34 @@ func (m *MutationExecutorImpl) Execute(ctx context.Context, req *vizierpb.Execut AgentPodName: mut.ConfigUpdate.AgentPodName, }) } + case *plannerpb.CompileMutation_FileSource: + { + name := mut.FileSource.GlobPattern + tableName := mut.FileSource.TableName + fileSourceReqs.Requests = append(fileSourceReqs.Requests, &ir.FileSourceDeployment{ + Name: name, + GlobPattern: name, + TableName: tableName, + TTL: mut.FileSource.TTL, + }) + if _, ok := m.activeFileSources[name]; ok { + return nil, fmt.Errorf("file source with name '%s', already used", name) + } + // TODO(ddelnano): Add unit tests that would have caught the bug with the + // file source output table issue. The line that caused the bug is left commented below: + // outputTablesMap[name] = true + outputTablesMap[tableName] = true + + m.activeFileSources[name] = &FileSourceInfo{ + GlobPattern: mut.FileSource.GlobPattern, + ID: uuid.Nil, + Status: nil, + } + } + case *plannerpb.CompileMutation_DeleteFileSource: + { + deleteFileSourcesReq.Names = append(deleteFileSourcesReq.Names, mut.DeleteFileSource.GlobPattern) + } } } @@ -210,6 +276,44 @@ func (m *MutationExecutorImpl) Execute(ctx context.Context, req *vizierpb.Execut } } + if len(fileSourceReqs.Requests) > 0 { + resp, err := m.mdfs.RegisterFileSource(ctx, fileSourceReqs) + if err != nil { + log.WithError(err). + Errorf("Failed to register file sources") + return nil, ErrFileSourceRegistrationFailed + } + if resp.Status != nil && resp.Status.ErrCode != statuspb.OK { + log.WithField("status", resp.Status.String()). + Errorf("Failed to register file sources with bad status") + return resp.Status, ErrFileSourceRegistrationFailed + } + + // Update the internal stat of the file sources. + for _, fs := range resp.FileSources { + id := utils.UUIDFromProtoOrNil(fs.ID) + m.activeFileSources[fs.Name].ID = id + m.activeFileSources[fs.Name].Status = fs.Status + } + } + if len(deleteFileSourcesReq.Names) > 0 { + delResp, err := m.mdfs.RemoveFileSource(ctx, deleteFileSourcesReq) + if err != nil { + log.WithError(err). + Errorf("Failed to delete tracepoints") + return nil, ErrFileSourceDeletionFailed + } + if delResp.Status != nil && delResp.Status.ErrCode != statuspb.OK { + log.WithField("status", delResp.Status.String()). + Errorf("Failed to delete tracepoints with bad status") + return delResp.Status, ErrFileSourceDeletionFailed + } + // Remove the tracepoints we considered deleted. + for _, fsName := range deleteFileSourcesReq.Names { + delete(m.activeFileSources, fsName) + } + } + m.outputTables = make([]string, 0) for k := range outputTablesMap { m.outputTables = append(m.outputTables, k) @@ -220,11 +324,17 @@ func (m *MutationExecutorImpl) Execute(ctx context.Context, req *vizierpb.Execut // MutationInfo returns the summarized mutation information. func (m *MutationExecutorImpl) MutationInfo(ctx context.Context) (*vizierpb.MutationInfo, error) { - req := &metadatapb.GetTracepointInfoRequest{ + tpReq := &metadatapb.GetTracepointInfoRequest{ IDs: make([]*uuidpb.UUID, 0), } for _, tp := range m.activeTracepoints { - req.IDs = append(req.IDs, utils.ProtoFromUUID(tp.ID)) + tpReq.IDs = append(tpReq.IDs, utils.ProtoFromUUID(tp.ID)) + } + fsReq := &metadatapb.GetFileSourceInfoRequest{ + IDs: make([]*uuidpb.UUID, 0), + } + for _, fs := range m.activeFileSources { + fsReq.IDs = append(fsReq.IDs, utils.ProtoFromUUID(fs.ID)) } aCtx, err := authcontext.FromContext(ctx) if err != nil { @@ -232,28 +342,45 @@ func (m *MutationExecutorImpl) MutationInfo(ctx context.Context) (*vizierpb.Muta } ctx = metadata.AppendToOutgoingContext(ctx, "authorization", fmt.Sprintf("bearer %s", aCtx.AuthToken)) - resp, err := m.mdtp.GetTracepointInfo(ctx, req) + tpResp, err := m.mdtp.GetTracepointInfo(ctx, tpReq) if err != nil { return nil, err } + fsResp, err := m.mdfs.GetFileSourceInfo(ctx, fsReq) + if err != nil { + return nil, err + } + tps := len(tpResp.Tracepoints) mutationInfo := &vizierpb.MutationInfo{ Status: &vizierpb.Status{Code: 0}, - States: make([]*vizierpb.MutationInfo_MutationState, len(resp.Tracepoints)), + States: make([]*vizierpb.MutationInfo_MutationState, tps+len(fsResp.FileSources)), } - ready := true - for idx, tp := range resp.Tracepoints { + tpReady := true + for idx, tp := range tpResp.Tracepoints { mutationInfo.States[idx] = &vizierpb.MutationInfo_MutationState{ ID: utils.UUIDFromProtoOrNil(tp.ID).String(), State: convertLifeCycleStateToVizierLifeCycleState(tp.State), Name: tp.Name, } if tp.State != statuspb.RUNNING_STATE { - ready = false + tpReady = false + } + } + + fsReady := true + for idx, fs := range fsResp.FileSources { + mutationInfo.States[idx+tps] = &vizierpb.MutationInfo_MutationState{ + ID: utils.UUIDFromProtoOrNil(fs.ID).String(), + State: convertLifeCycleStateToVizierLifeCycleState(fs.State), + Name: fs.Name, + } + if fs.State != statuspb.RUNNING_STATE { + fsReady = false } } - if !ready { + if !tpReady { mutationInfo.Status = &vizierpb.Status{ Code: int32(codes.Unavailable), Message: "probe installation in progress", @@ -261,6 +388,14 @@ func (m *MutationExecutorImpl) MutationInfo(ctx context.Context) (*vizierpb.Muta return mutationInfo, nil } + if !fsReady { + mutationInfo.Status = &vizierpb.Status{ + Code: int32(codes.Unavailable), + Message: "file source installation in progress", + } + return mutationInfo, nil + } + if !m.isSchemaReady() { mutationInfo.Status = &vizierpb.Status{ Code: int32(codes.Unavailable), diff --git a/src/vizier/services/query_broker/controllers/query_executor.go b/src/vizier/services/query_broker/controllers/query_executor.go index 4d8ff7b7b6b..8897034bcaa 100644 --- a/src/vizier/services/query_broker/controllers/query_executor.go +++ b/src/vizier/services/query_broker/controllers/query_executor.go @@ -89,6 +89,7 @@ type DataPrivacy interface { // MutationExecFactory is a function that creates a new MutationExecutorImpl. type MutationExecFactory func(Planner, metadatapb.MetadataTracepointServiceClient, + metadatapb.MetadataFileSourceServiceClient, metadatapb.MetadataConfigServiceClient, *distributedpb.DistributedState) MutationExecutor @@ -100,6 +101,7 @@ type QueryExecutorImpl struct { dataPrivacy DataPrivacy natsConn *nats.Conn mdtp metadatapb.MetadataTracepointServiceClient + mdfs metadatapb.MetadataFileSourceServiceClient mdconf metadatapb.MetadataConfigServiceClient resultForwarder QueryResultForwarder planner Planner @@ -127,6 +129,7 @@ func NewQueryExecutorFromServer(s *Server, mutExecFactory MutationExecFactory) Q s.dataPrivacy, s.natsConn, s.mdtp, + s.mdfs, s.mdconf, s.resultForwarder, s.planner, @@ -142,6 +145,7 @@ func NewQueryExecutor( dataPrivacy DataPrivacy, natsConn *nats.Conn, mdtp metadatapb.MetadataTracepointServiceClient, + mdfs metadatapb.MetadataFileSourceServiceClient, mdconf metadatapb.MetadataConfigServiceClient, resultForwarder QueryResultForwarder, planner Planner, @@ -154,6 +158,7 @@ func NewQueryExecutor( dataPrivacy: dataPrivacy, natsConn: natsConn, mdtp: mdtp, + mdfs: mdfs, mdconf: mdconf, resultForwarder: resultForwarder, planner: planner, @@ -292,7 +297,7 @@ func (q *QueryExecutorImpl) getPlanOpts(queryStr string) (*planpb.PlanOptions, e } func (q *QueryExecutorImpl) runMutation(ctx context.Context, resultCh chan<- *vizierpb.ExecuteScriptResponse, req *vizierpb.ExecuteScriptRequest, planOpts *planpb.PlanOptions, distributedState *distributedpb.DistributedState) error { - mutationExec := q.mutationExecFactory(q.planner, q.mdtp, q.mdconf, distributedState) + mutationExec := q.mutationExecFactory(q.planner, q.mdtp, q.mdfs, q.mdconf, distributedState) s, err := mutationExec.Execute(ctx, req, planOpts) if err != nil { diff --git a/src/vizier/services/query_broker/controllers/query_executor_test.go b/src/vizier/services/query_broker/controllers/query_executor_test.go index 1bbe5b35b19..710b54c0f28 100644 --- a/src/vizier/services/query_broker/controllers/query_executor_test.go +++ b/src/vizier/services/query_broker/controllers/query_executor_test.go @@ -409,7 +409,7 @@ func runTestCase(t *testing.T, test *queryExecTestCase) { } dp := &fakeDataPrivacy{} - queryExec := controllers.NewQueryExecutor("qb_address", "qb_hostname", at, dp, nc, nil, nil, rf, planner, test.MutExecFactory) + queryExec := controllers.NewQueryExecutor("qb_address", "qb_hostname", at, dp, nc, nil, nil, nil, rf, planner, test.MutExecFactory) consumer := newTestConsumer(test.ConsumeErrs) assert.Equal(t, test.QueryExecExpectedRunError, queryExec.Run(context.Background(), test.Req, consumer)) @@ -806,7 +806,7 @@ func buildMutationFailedQueryTestCase(t *testing.T) queryExecTestCase { QueryExecExpectedWaitError: err, StreamResultsErr: err, StreamResultsCallExpected: true, - MutExecFactory: func(planner controllers.Planner, client metadatapb.MetadataTracepointServiceClient, client2 metadatapb.MetadataConfigServiceClient, state *distributedpb.DistributedState) controllers.MutationExecutor { + MutExecFactory: func(planner controllers.Planner, client metadatapb.MetadataTracepointServiceClient, client2 metadatapb.MetadataFileSourceServiceClient, client3 metadatapb.MetadataConfigServiceClient, state *distributedpb.DistributedState) controllers.MutationExecutor { return &fakeMutationExecutor{ MutInfo: mutInfo, ExecuteStatus: nil, diff --git a/src/vizier/services/query_broker/controllers/server.go b/src/vizier/services/query_broker/controllers/server.go index 9626a8046d0..fae5d15ad91 100644 --- a/src/vizier/services/query_broker/controllers/server.go +++ b/src/vizier/services/query_broker/controllers/server.go @@ -82,6 +82,7 @@ type Server struct { healthcheckQuitOnce sync.Once mdtp metadatapb.MetadataTracepointServiceClient + mdfs metadatapb.MetadataFileSourceServiceClient mdconf metadatapb.MetadataConfigServiceClient resultForwarder QueryResultForwarder @@ -95,9 +96,8 @@ type QueryExecutorFactory func(*Server, MutationExecFactory) QueryExecutor // NewServer creates GRPC handlers. func NewServer(env querybrokerenv.QueryBrokerEnv, agentsTracker AgentsTracker, dataPrivacy DataPrivacy, - mds metadatapb.MetadataTracepointServiceClient, mdconf metadatapb.MetadataConfigServiceClient, - natsConn *nats.Conn, queryExecFactory QueryExecutorFactory, -) (*Server, error) { + mds metadatapb.MetadataTracepointServiceClient, mdfs metadatapb.MetadataFileSourceServiceClient, mdconf metadatapb.MetadataConfigServiceClient, + natsConn *nats.Conn, queryExecFactory QueryExecutorFactory) (*Server, error) { var udfInfo udfspb.UDFInfo if err := loadUDFInfo(&udfInfo); err != nil { return nil, err @@ -107,7 +107,7 @@ func NewServer(env querybrokerenv.QueryBrokerEnv, agentsTracker AgentsTracker, d return nil, err } - return NewServerWithForwarderAndPlanner(env, agentsTracker, dataPrivacy, NewQueryResultForwarder(), mds, mdconf, + return NewServerWithForwarderAndPlanner(env, agentsTracker, dataPrivacy, NewQueryResultForwarder(), mds, mdfs, mdconf, natsConn, c, queryExecFactory) } @@ -117,6 +117,7 @@ func NewServerWithForwarderAndPlanner(env querybrokerenv.QueryBrokerEnv, dataPrivacy DataPrivacy, resultForwarder QueryResultForwarder, mds metadatapb.MetadataTracepointServiceClient, + mdfs metadatapb.MetadataFileSourceServiceClient, mdconf metadatapb.MetadataConfigServiceClient, natsConn *nats.Conn, planner Planner, @@ -129,6 +130,7 @@ func NewServerWithForwarderAndPlanner(env querybrokerenv.QueryBrokerEnv, resultForwarder: resultForwarder, natsConn: natsConn, mdtp: mds, + mdfs: mdfs, mdconf: mdconf, planner: planner, queryExecFactory: queryExecFactory, diff --git a/src/vizier/services/query_broker/controllers/server_test.go b/src/vizier/services/query_broker/controllers/server_test.go index 1bd568c2631..2d11071385d 100644 --- a/src/vizier/services/query_broker/controllers/server_test.go +++ b/src/vizier/services/query_broker/controllers/server_test.go @@ -267,7 +267,7 @@ func TestCheckHealth(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(nil, nil, dp, nil, nil, nil, nil, nil, queryExecFactory) + s, err := controllers.NewServerWithForwarderAndPlanner(nil, nil, dp, nil, nil, nil, nil, nil, nil, queryExecFactory) require.NoError(t, err) err = s.CheckHealth(context.Background()) @@ -392,7 +392,7 @@ func TestExecuteScript(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(nil, nil, dp, nil, nil, nil, nil, nil, queryExecFactory) + s, err := controllers.NewServerWithForwarderAndPlanner(nil, nil, dp, nil, nil, nil, nil, nil, nil, queryExecFactory) require.NoError(t, err) // Set up mocks. @@ -456,7 +456,7 @@ func TestTransferResultChunk_AgentStreamComplete(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nc, nil, nil) + s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nil, nc, nil, nil) require.NoError(t, err) defer s.Close() @@ -547,7 +547,7 @@ func TestTransferResultChunk_AgentClosedPrematurely(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nc, nil, nil) + s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nil, nc, nil, nil) require.NoError(t, err) defer s.Close() @@ -631,7 +631,7 @@ func TestTransferResultChunk_AgentStreamFailed(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nc, nil, nil) + s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nil, nc, nil, nil) require.NoError(t, err) defer s.Close() @@ -709,7 +709,7 @@ func TestTransferResultChunk_ClientStreamCancelled(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nc, nil, nil) + s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nil, nc, nil, nil) require.NoError(t, err) defer s.Close() diff --git a/src/vizier/services/query_broker/query_broker_server.go b/src/vizier/services/query_broker/query_broker_server.go index ef037d32f8b..5bc9b74c6ec 100644 --- a/src/vizier/services/query_broker/query_broker_server.go +++ b/src/vizier/services/query_broker/query_broker_server.go @@ -141,6 +141,7 @@ func main() { mdsClient := metadatapb.NewMetadataServiceClient(mdsConn) mdtpClient := metadatapb.NewMetadataTracepointServiceClient(mdsConn) + mdfsClient := metadatapb.NewMetadataFileSourceServiceClient(mdsConn) mdconfClient := metadatapb.NewMetadataConfigServiceClient(mdsConn) csClient := metadatapb.NewCronScriptStoreServiceClient(mdsConn) @@ -170,7 +171,7 @@ func main() { agentTracker := tracker.NewAgents(mdsClient, viper.GetString("jwt_signing_key")) agentTracker.Start() defer agentTracker.Stop() - svr, err := controllers.NewServer(env, agentTracker, dataPrivacy, mdtpClient, mdconfClient, natsConn, controllers.NewQueryExecutorFromServer) + svr, err := controllers.NewServer(env, agentTracker, dataPrivacy, mdtpClient, mdfsClient, mdconfClient, natsConn, controllers.NewQueryExecutorFromServer) if err != nil { log.WithError(err).Fatal("Failed to initialize GRPC server funcs.") } diff --git a/src/vizier/services/query_broker/script_runner/script_runner.go b/src/vizier/services/query_broker/script_runner/script_runner.go index 48f78b9427b..fbe8afae032 100644 --- a/src/vizier/services/query_broker/script_runner/script_runner.go +++ b/src/vizier/services/query_broker/script_runner/script_runner.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "io" + "strings" "sync" "time" @@ -262,13 +263,17 @@ func (r *runner) runScript(scriptPeriod time.Duration) { } } - // We set the time 1 second in the past to cover colletor latency and request latencies + // We set the time 1 second in the past to cover collector latency and request latencies // which can cause data overlaps or cause data to be missed. startTime := r.lastRun.Add(-time.Second) endTime := startTime.Add(scriptPeriod) r.lastRun = time.Now() + // TODO(ddelnano): This might not be the correct approach for handling mutations. + // This is done until the pxlog source can work with an indefinite ttl. + hasMutation := strings.Contains(r.cronScript.Script, "pxlog") execScriptClient, err := r.vzClient.ExecuteScript(ctx, &vizierpb.ExecuteScriptRequest{ QueryStr: r.cronScript.Script, + Mutation: hasMutation, Configs: &vizierpb.Configs{ OTelEndpointConfig: otelEndpoint, PluginConfig: &vizierpb.Configs_PluginConfig{ diff --git a/src/vizier/services/query_broker/tracker/agents_info.go b/src/vizier/services/query_broker/tracker/agents_info.go index f881c974832..f36ebe8dd06 100644 --- a/src/vizier/services/query_broker/tracker/agents_info.go +++ b/src/vizier/services/query_broker/tracker/agents_info.go @@ -128,7 +128,7 @@ func (a *AgentsInfoImpl) UpdateAgentsInfo(update *metadatapb.AgentUpdatesRespons } else { // this is a Kelvin kelvinGRPCAddress := agent.Info.IPAddress - carnotInfoMap[agentUUID] = makeKelvinCarnotInfo(agentUUID, kelvinGRPCAddress, agent.ASID) + carnotInfoMap[agentUUID] = makeKelvinCarnotInfo(agentUUID, kelvinGRPCAddress, agent.ASID, agent.Info.Capabilities.StoresData) } } // case 2: agent data info update @@ -197,14 +197,14 @@ func makeAgentCarnotInfo(agentID uuid.UUID, asid uint32, agentMetadata *distribu } } -func makeKelvinCarnotInfo(agentID uuid.UUID, grpcAddress string, asid uint32) *distributedpb.CarnotInfo { +func makeKelvinCarnotInfo(agentID uuid.UUID, grpcAddress string, asid uint32, storesData bool) *distributedpb.CarnotInfo { return &distributedpb.CarnotInfo{ QueryBrokerAddress: agentID.String(), AgentID: utils.ProtoFromUUID(agentID), ASID: asid, HasGRPCServer: true, GRPCAddress: grpcAddress, - HasDataStore: false, + HasDataStore: storesData, ProcessesData: true, AcceptsRemoteSources: true, // When we support persistent storage, Kelvins will also have MetadataInfo. diff --git a/src/vizier/services/shared/agentpb/agent.pb.go b/src/vizier/services/shared/agentpb/agent.pb.go index 4cd57a106ea..c7fa65e27bd 100755 --- a/src/vizier/services/shared/agentpb/agent.pb.go +++ b/src/vizier/services/shared/agentpb/agent.pb.go @@ -56,6 +56,7 @@ func (AgentState) EnumDescriptor() ([]byte, []int) { type AgentCapabilities struct { CollectsData bool `protobuf:"varint,1,opt,name=collects_data,json=collectsData,proto3" json:"collects_data,omitempty"` + StoresData bool `protobuf:"varint,2,opt,name=stores_data,json=storesData,proto3" json:"stores_data,omitempty"` } func (m *AgentCapabilities) Reset() { *m = AgentCapabilities{} } @@ -97,6 +98,13 @@ func (m *AgentCapabilities) GetCollectsData() bool { return false } +func (m *AgentCapabilities) GetStoresData() bool { + if m != nil { + return m.StoresData + } + return false +} + type AgentParameters struct { ProfilerStackTraceSamplePeriodMS int32 `protobuf:"varint,1,opt,name=profiler_stack_trace_sample_period_ms,json=profilerStackTraceSamplePeriodMs,proto3" json:"profiler_stack_trace_sample_period_ms,omitempty"` } @@ -483,61 +491,62 @@ func init() { } var fileDescriptor_fef0af3bd5248f34 = []byte{ - // 864 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0xc6, 0x49, 0x6c, 0x4f, 0xe2, 0xc6, 0x9d, 0x46, 0xd4, 0x84, 0x6a, 0x37, 0x72, 0x41, - 0x2a, 0x05, 0xad, 0x51, 0x90, 0xa0, 0x17, 0x40, 0x76, 0xec, 0x62, 0xab, 0x65, 0x63, 0xcd, 0x3a, - 0x41, 0x70, 0x19, 0x8d, 0x77, 0x27, 0xc9, 0xd0, 0xf5, 0xee, 0x68, 0x66, 0x62, 0x55, 0x3d, 0x71, - 0xe4, 0xc8, 0x5f, 0xe0, 0xc6, 0x4f, 0xe1, 0x98, 0x63, 0x4f, 0x16, 0xd9, 0x70, 0xe8, 0xb1, 0x3f, - 0x01, 0xed, 0xdb, 0x75, 0x53, 0xb7, 0x52, 0x93, 0xd3, 0xbe, 0x79, 0xdf, 0xf7, 0xbd, 0x37, 0xf3, - 0xbd, 0x27, 0x1b, 0xb9, 0x5a, 0x05, 0xed, 0x99, 0x78, 0x21, 0xb8, 0x6a, 0x6b, 0xae, 0x66, 0x22, - 0xe0, 0xba, 0xad, 0x4f, 0x99, 0xe2, 0x61, 0x9b, 0x9d, 0xf0, 0xd8, 0xc8, 0x49, 0xfe, 0x75, 0xa5, - 0x4a, 0x4c, 0x82, 0x1d, 0xf9, 0xdc, 0xcd, 0xe9, 0xee, 0x82, 0xee, 0xe6, 0x74, 0x17, 0x68, 0x3b, - 0xdb, 0x27, 0xc9, 0x49, 0x02, 0xdc, 0x76, 0x16, 0xe5, 0xb2, 0x1d, 0x27, 0x6b, 0xc3, 0xa4, 0x68, - 0xe7, 0xc8, 0xd9, 0x99, 0x08, 0xe5, 0x04, 0x3e, 0x39, 0xa1, 0xf5, 0x08, 0xdd, 0xee, 0x64, 0xfa, - 0x7d, 0x26, 0xd9, 0x44, 0x44, 0xc2, 0x08, 0xae, 0xf1, 0x7d, 0x54, 0x0f, 0x92, 0x28, 0xe2, 0x81, - 0xd1, 0x34, 0x64, 0x86, 0x35, 0xad, 0x5d, 0xeb, 0x41, 0x95, 0x6c, 0x2e, 0x92, 0x3d, 0x66, 0x58, - 0xeb, 0x0f, 0x0b, 0x6d, 0x81, 0x74, 0xc4, 0x14, 0x9b, 0x72, 0xc3, 0x95, 0xc6, 0x67, 0xe8, 0x33, - 0xa9, 0x92, 0x63, 0x11, 0x71, 0x45, 0xb5, 0x61, 0xc1, 0x33, 0x6a, 0x14, 0x0b, 0x38, 0xd5, 0x6c, - 0x2a, 0x23, 0x4e, 0x25, 0x57, 0x22, 0x09, 0xe9, 0x54, 0x43, 0xc1, 0xb5, 0xee, 0xa7, 0xe9, 0xdc, - 0xd9, 0x1d, 0x15, 0x02, 0x3f, 0xe3, 0x8f, 0x33, 0xba, 0x0f, 0xec, 0x11, 0x90, 0x7f, 0xf2, 0xc9, - 0xae, 0xfc, 0x30, 0x43, 0xb7, 0xfe, 0x5b, 0x41, 0x35, 0xb8, 0xca, 0x30, 0x3e, 0x4e, 0xf0, 0xb7, - 0xa8, 0x0a, 0x96, 0x50, 0x11, 0x42, 0x9f, 0x8d, 0xbd, 0x2d, 0x57, 0x3e, 0x77, 0xf3, 0xb7, 0xbb, - 0x87, 0x87, 0xc3, 0x5e, 0x77, 0x23, 0x9d, 0x3b, 0x95, 0x5c, 0xd1, 0x23, 0x15, 0x60, 0x0f, 0x43, - 0xfc, 0x18, 0xd5, 0x4e, 0x13, 0x6d, 0xa8, 0x88, 0x8f, 0x93, 0xe6, 0x0a, 0x28, 0x3f, 0x77, 0xaf, - 0xf1, 0xdd, 0x1d, 0x24, 0x1a, 0xda, 0x92, 0xea, 0x69, 0x11, 0xe1, 0x2f, 0x11, 0x12, 0x92, 0xb2, - 0x30, 0x54, 0x5c, 0xeb, 0x66, 0x79, 0xd7, 0x7a, 0x50, 0xeb, 0xd6, 0xd3, 0xb9, 0x53, 0x1b, 0x8e, - 0x3a, 0x79, 0x92, 0xd4, 0x84, 0x2c, 0x42, 0x7c, 0x84, 0x36, 0x83, 0xb7, 0xcc, 0x6f, 0xae, 0x42, - 0xe3, 0xbd, 0x6b, 0x1b, 0xbf, 0x37, 0x36, 0xb2, 0x54, 0x07, 0x8f, 0x10, 0x92, 0x6f, 0x26, 0xd3, - 0x5c, 0x83, 0xaa, 0x5f, 0xdd, 0xac, 0xea, 0xd5, 0x44, 0xc9, 0x5b, 0x35, 0x5a, 0x01, 0xaa, 0x3f, - 0xe1, 0x2a, 0xe6, 0xd1, 0x11, 0x57, 0x5a, 0x24, 0x31, 0x6e, 0xa2, 0xca, 0x2c, 0x0f, 0xc1, 0xe8, - 0x3a, 0x59, 0x1c, 0xf1, 0x27, 0xa8, 0x36, 0x65, 0xbf, 0x25, 0x8a, 0x2a, 0x3e, 0x03, 0x2b, 0xeb, - 0xa4, 0x0a, 0x09, 0xc2, 0x67, 0x00, 0x8a, 0xb8, 0x00, 0xcb, 0x05, 0x98, 0x25, 0x08, 0x9f, 0xb5, - 0x5e, 0x59, 0xa8, 0xba, 0xf0, 0x14, 0xef, 0x20, 0x70, 0x35, 0x66, 0x53, 0x0e, 0x1d, 0x6a, 0xe4, - 0xcd, 0x19, 0x7f, 0x8c, 0xaa, 0x32, 0x09, 0x29, 0x60, 0x2b, 0x80, 0x55, 0x64, 0x12, 0x7a, 0x19, - 0x74, 0x1f, 0x55, 0xf2, 0x41, 0xca, 0xc2, 0x7d, 0x94, 0xce, 0x9d, 0x75, 0xa8, 0x3a, 0x22, 0xeb, - 0x30, 0x27, 0x89, 0x1f, 0xa3, 0xf5, 0x67, 0xf0, 0x9a, 0xc2, 0x71, 0xf7, 0x5a, 0x6f, 0x96, 0x1e, - 0x4f, 0x0a, 0x35, 0x7e, 0x84, 0x9a, 0x79, 0x44, 0x4f, 0x39, 0x0b, 0xb9, 0xd2, 0x54, 0xc4, 0xda, - 0xb0, 0x28, 0xe2, 0x21, 0xb8, 0x5e, 0x25, 0x1f, 0xe5, 0xf8, 0x20, 0x87, 0x87, 0x0b, 0xb4, 0x35, - 0xb7, 0xd0, 0x1a, 0xf8, 0x8d, 0xbf, 0x47, 0xab, 0xb0, 0x74, 0xf9, 0xba, 0x3e, 0xbc, 0xd9, 0x94, - 0x60, 0xeb, 0x40, 0x87, 0xbf, 0x41, 0xb7, 0x02, 0xc5, 0x99, 0xe1, 0xd4, 0x88, 0x29, 0xa7, 0xb1, - 0x06, 0x47, 0xca, 0xdd, 0x46, 0x3a, 0x77, 0x36, 0xf7, 0x01, 0x19, 0x8b, 0x29, 0xf7, 0x7c, 0xb2, - 0x19, 0x5c, 0x9d, 0x34, 0xfe, 0x01, 0xdd, 0x8e, 0x98, 0x36, 0xd9, 0xcd, 0x95, 0x99, 0x70, 0x66, - 0x32, 0x69, 0x19, 0xa4, 0x77, 0xd2, 0xb9, 0xb3, 0xf5, 0x94, 0x69, 0x33, 0x58, 0x60, 0x9e, 0x4f, - 0xb6, 0xa2, 0xa5, 0x84, 0xc6, 0xf7, 0xd0, 0x2a, 0xd3, 0x22, 0x04, 0x0b, 0xeb, 0xdd, 0x6a, 0x3a, - 0x77, 0x56, 0x3b, 0xfe, 0xb0, 0x47, 0x20, 0xdb, 0xfa, 0xcb, 0x42, 0x1b, 0x70, 0x55, 0xdf, 0x30, - 0x73, 0xa6, 0xf1, 0x01, 0xba, 0x1b, 0x6b, 0xaa, 0x45, 0x1c, 0x70, 0xba, 0xdc, 0x17, 0x5e, 0x5e, - 0xee, 0x36, 0xd3, 0xb9, 0xb3, 0xed, 0xf9, 0x7e, 0xc6, 0x58, 0xea, 0x4d, 0xb6, 0x63, 0xfd, 0x7e, - 0x16, 0x77, 0xd0, 0x9a, 0x36, 0xcc, 0xe4, 0x0b, 0x70, 0x6b, 0xef, 0x8b, 0x9b, 0x19, 0x97, 0xdd, - 0x86, 0x93, 0x5c, 0xf9, 0xf0, 0x05, 0x42, 0x57, 0x49, 0x7c, 0x17, 0xdd, 0xe9, 0xfc, 0xd8, 0xf7, - 0xc6, 0xd4, 0x1f, 0x77, 0xc6, 0x7d, 0x7a, 0xe8, 0x3d, 0xf1, 0x0e, 0x7e, 0xf6, 0x1a, 0xa5, 0x77, - 0x81, 0x41, 0xbf, 0xf3, 0x74, 0x3c, 0xf8, 0xa5, 0x61, 0xe1, 0x7b, 0xa8, 0xb9, 0xac, 0x20, 0x7d, - 0x7f, 0x74, 0xe0, 0xf9, 0xc3, 0xa3, 0x7e, 0x63, 0xe5, 0x5d, 0xb4, 0x37, 0xf4, 0xf7, 0x0f, 0x3c, - 0xaf, 0xbf, 0x3f, 0xee, 0xf7, 0x1a, 0xe5, 0xee, 0x77, 0xe7, 0x17, 0x76, 0xe9, 0xe5, 0x85, 0x5d, - 0x7a, 0x7d, 0x61, 0x5b, 0xbf, 0xa7, 0xb6, 0xf5, 0x77, 0x6a, 0x5b, 0xff, 0xa4, 0xb6, 0x75, 0x9e, - 0xda, 0xd6, 0xbf, 0xa9, 0x6d, 0xbd, 0x4a, 0xed, 0xd2, 0xeb, 0xd4, 0xb6, 0xfe, 0xbc, 0xb4, 0x4b, - 0xe7, 0x97, 0x76, 0xe9, 0xe5, 0xa5, 0x5d, 0xfa, 0xb5, 0x52, 0xfc, 0x3f, 0x4c, 0xd6, 0xe1, 0x27, - 0xfc, 0xeb, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe9, 0xec, 0x47, 0x21, 0x4c, 0x06, 0x00, 0x00, + // 879 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x41, 0x6f, 0xdb, 0x36, + 0x14, 0xb6, 0xe2, 0x24, 0xb6, 0x5f, 0xe2, 0xc6, 0x65, 0x83, 0xd5, 0xcb, 0x0a, 0x29, 0x70, 0x37, + 0xa0, 0xeb, 0x06, 0x79, 0xc8, 0x80, 0x6d, 0x97, 0x6d, 0xb0, 0x63, 0x77, 0x36, 0xda, 0x29, 0x06, + 0xe5, 0x64, 0xe8, 0x2e, 0x02, 0x2d, 0x31, 0x09, 0x57, 0x59, 0x12, 0x48, 0xc6, 0x28, 0x7a, 0xda, + 0x71, 0xc7, 0xfd, 0x85, 0xdd, 0xf6, 0x53, 0x76, 0xcc, 0xb1, 0x27, 0x63, 0x51, 0x76, 0xe8, 0xb1, + 0x3f, 0x61, 0xd0, 0x93, 0xdc, 0xd4, 0x2d, 0xd0, 0xe4, 0x24, 0xf2, 0x7d, 0xdf, 0xf7, 0x3e, 0xf2, + 0x7b, 0x84, 0x0d, 0xb6, 0x92, 0x7e, 0x7b, 0x26, 0x5e, 0x08, 0x2e, 0xdb, 0x8a, 0xcb, 0x99, 0xf0, + 0xb9, 0x6a, 0xab, 0x53, 0x26, 0x79, 0xd0, 0x66, 0x27, 0x3c, 0xd2, 0xc9, 0x24, 0xff, 0xda, 0x89, + 0x8c, 0x75, 0x4c, 0xac, 0xe4, 0xb9, 0x9d, 0xd3, 0xed, 0x05, 0xdd, 0xce, 0xe9, 0x36, 0xd2, 0x76, + 0xb6, 0x4f, 0xe2, 0x93, 0x18, 0xb9, 0xed, 0x6c, 0x95, 0xcb, 0x76, 0xac, 0xcc, 0x86, 0x25, 0xa2, + 0x9d, 0x23, 0x67, 0x67, 0x22, 0x48, 0x26, 0xf8, 0xc9, 0x09, 0xad, 0xa7, 0x70, 0xbb, 0x93, 0xe9, + 0xf7, 0x59, 0xc2, 0x26, 0x22, 0x14, 0x5a, 0x70, 0x45, 0xee, 0x43, 0xdd, 0x8f, 0xc3, 0x90, 0xfb, + 0x5a, 0x79, 0x01, 0xd3, 0xac, 0x69, 0xec, 0x1a, 0x0f, 0xaa, 0x74, 0x73, 0x51, 0xec, 0x31, 0xcd, + 0x88, 0x05, 0x1b, 0x4a, 0xc7, 0x92, 0x17, 0x94, 0x15, 0xa4, 0x40, 0x5e, 0xca, 0x08, 0xad, 0x3f, + 0x0c, 0xd8, 0xc2, 0xde, 0x23, 0x26, 0xd9, 0x94, 0x6b, 0x2e, 0x15, 0x39, 0x83, 0xcf, 0x12, 0x19, + 0x1f, 0x8b, 0x90, 0x4b, 0x4f, 0x69, 0xe6, 0x3f, 0xf3, 0xb4, 0x64, 0x3e, 0xf7, 0x14, 0x9b, 0x26, + 0x21, 0xf7, 0x12, 0x2e, 0x45, 0x1c, 0x78, 0x53, 0x85, 0x8e, 0x6b, 0xdd, 0x4f, 0xd3, 0xb9, 0xb5, + 0x3b, 0x2a, 0x04, 0x6e, 0xc6, 0x1f, 0x67, 0x74, 0x17, 0xd9, 0x23, 0x24, 0xff, 0xec, 0xd2, 0xdd, + 0xe4, 0xc3, 0x0c, 0xd5, 0xfa, 0x6f, 0x05, 0x6a, 0x78, 0x94, 0x61, 0x74, 0x1c, 0x93, 0x6f, 0xa1, + 0x8a, 0x99, 0x79, 0x22, 0x40, 0x9f, 0x8d, 0xbd, 0x2d, 0x3b, 0x79, 0x6e, 0xe7, 0xe1, 0xd8, 0x87, + 0x87, 0xc3, 0x5e, 0x77, 0x23, 0x9d, 0x5b, 0x95, 0x5c, 0xd1, 0xa3, 0x15, 0x64, 0x0f, 0x03, 0xf2, + 0x08, 0x6a, 0xa7, 0xb1, 0xd2, 0x9e, 0x88, 0x8e, 0x63, 0xbc, 0xf0, 0xc6, 0xde, 0xe7, 0xf6, 0x35, + 0x83, 0xb1, 0x07, 0xb1, 0x42, 0x5b, 0x5a, 0x3d, 0x2d, 0x56, 0xe4, 0x4b, 0x00, 0x91, 0x78, 0x2c, + 0x08, 0x24, 0x57, 0xaa, 0x59, 0xde, 0x35, 0x1e, 0xd4, 0xba, 0xf5, 0x74, 0x6e, 0xd5, 0x86, 0xa3, + 0x4e, 0x5e, 0xa4, 0x35, 0x91, 0x14, 0x4b, 0x72, 0x04, 0x9b, 0xfe, 0x5b, 0xd3, 0x69, 0xae, 0xa2, + 0xf1, 0xde, 0xb5, 0xc6, 0xef, 0xcd, 0x95, 0x2e, 0xf5, 0x21, 0x23, 0x80, 0xe4, 0xcd, 0x64, 0x9a, + 0x6b, 0xd8, 0xf5, 0xab, 0x9b, 0x75, 0xbd, 0x9a, 0x28, 0x7d, 0xab, 0x47, 0xcb, 0x87, 0xfa, 0x63, + 0x2e, 0x23, 0x1e, 0x1e, 0x71, 0xa9, 0x44, 0x1c, 0x91, 0x26, 0x54, 0x66, 0xf9, 0x12, 0x83, 0xae, + 0xd3, 0xc5, 0x96, 0x7c, 0x02, 0xb5, 0x29, 0xfb, 0x2d, 0x96, 0x9e, 0xe4, 0x33, 0x8c, 0xb2, 0x4e, + 0xab, 0x58, 0xa0, 0x7c, 0x86, 0xa0, 0x88, 0x0a, 0xb0, 0x5c, 0x80, 0x59, 0x81, 0xf2, 0x59, 0xeb, + 0x95, 0x01, 0xd5, 0x45, 0xa6, 0x64, 0x07, 0x30, 0xd5, 0x88, 0x4d, 0x39, 0x3a, 0xd4, 0xe8, 0x9b, + 0x3d, 0xf9, 0x18, 0xaa, 0x49, 0x1c, 0x78, 0x88, 0xad, 0x20, 0x56, 0x49, 0xe2, 0xc0, 0xc9, 0xa0, + 0xfb, 0x50, 0xc9, 0x07, 0x99, 0x14, 0xe9, 0x43, 0x3a, 0xb7, 0xd6, 0xb1, 0xeb, 0x88, 0xae, 0xe3, + 0x9c, 0x12, 0xf2, 0x08, 0xd6, 0x9f, 0xe1, 0x6d, 0x8a, 0xc4, 0xed, 0x6b, 0xb3, 0x59, 0xba, 0x3c, + 0x2d, 0xd4, 0xe4, 0x3b, 0x68, 0xe6, 0x2b, 0xef, 0x94, 0xb3, 0x80, 0x4b, 0xe5, 0x89, 0x48, 0x69, + 0x16, 0x86, 0x3c, 0xc0, 0xd4, 0xab, 0xf4, 0xa3, 0x1c, 0x1f, 0xe4, 0xf0, 0x70, 0x81, 0xb6, 0xe6, + 0x06, 0xac, 0x61, 0xde, 0xe4, 0x07, 0x58, 0xc5, 0x47, 0x97, 0x3f, 0xd7, 0x87, 0x37, 0x9b, 0x12, + 0xbe, 0x3a, 0xd4, 0x91, 0x6f, 0xe0, 0x96, 0x2f, 0x39, 0xd3, 0xdc, 0xd3, 0x62, 0xca, 0xbd, 0x48, + 0x61, 0x22, 0xe5, 0x6e, 0x23, 0x9d, 0x5b, 0x9b, 0xfb, 0x88, 0x8c, 0xc5, 0x94, 0x3b, 0x2e, 0xdd, + 0xf4, 0xaf, 0x76, 0x8a, 0xfc, 0x08, 0xb7, 0x43, 0xa6, 0x74, 0x76, 0x72, 0xa9, 0x27, 0x9c, 0xe9, + 0x4c, 0x5a, 0x46, 0xe9, 0x9d, 0x74, 0x6e, 0x6d, 0x3d, 0x61, 0x4a, 0x0f, 0x16, 0x98, 0xe3, 0xd2, + 0xad, 0x70, 0xa9, 0xa0, 0xc8, 0x3d, 0x58, 0x65, 0x4a, 0x04, 0x18, 0x61, 0xbd, 0x5b, 0x4d, 0xe7, + 0xd6, 0x6a, 0xc7, 0x1d, 0xf6, 0x28, 0x56, 0x5b, 0x7f, 0x19, 0xb0, 0x81, 0x47, 0x75, 0x35, 0xd3, + 0x67, 0x8a, 0x1c, 0xc0, 0xdd, 0x48, 0x79, 0x4a, 0x44, 0x3e, 0xf7, 0x96, 0x7d, 0xf1, 0xe6, 0xe5, + 0x6e, 0x33, 0x9d, 0x5b, 0xdb, 0x8e, 0xeb, 0x66, 0x8c, 0x25, 0x6f, 0xba, 0x1d, 0xa9, 0xf7, 0xab, + 0xa4, 0x03, 0x6b, 0x4a, 0x33, 0x9d, 0x3f, 0x80, 0x5b, 0x7b, 0x5f, 0xdc, 0x2c, 0xb8, 0xec, 0x34, + 0x9c, 0xe6, 0xca, 0x87, 0x2f, 0x00, 0xae, 0x8a, 0xe4, 0x2e, 0xdc, 0xe9, 0xfc, 0xd4, 0x77, 0xc6, + 0x9e, 0x3b, 0xee, 0x8c, 0xfb, 0xde, 0xa1, 0xf3, 0xd8, 0x39, 0xf8, 0xc5, 0x69, 0x94, 0xde, 0x05, + 0x06, 0xfd, 0xce, 0x93, 0xf1, 0xe0, 0x69, 0xc3, 0x20, 0xf7, 0xa0, 0xb9, 0xac, 0xa0, 0x7d, 0x77, + 0x74, 0xe0, 0xb8, 0xc3, 0xa3, 0x7e, 0x63, 0xe5, 0x5d, 0xb4, 0x37, 0x74, 0xf7, 0x0f, 0x1c, 0xa7, + 0xbf, 0x3f, 0xee, 0xf7, 0x1a, 0xe5, 0xee, 0xf7, 0xe7, 0x17, 0x66, 0xe9, 0xe5, 0x85, 0x59, 0x7a, + 0x7d, 0x61, 0x1a, 0xbf, 0xa7, 0xa6, 0xf1, 0x77, 0x6a, 0x1a, 0xff, 0xa4, 0xa6, 0x71, 0x9e, 0x9a, + 0xc6, 0xbf, 0xa9, 0x69, 0xbc, 0x4a, 0xcd, 0xd2, 0xeb, 0xd4, 0x34, 0xfe, 0xbc, 0x34, 0x4b, 0xe7, + 0x97, 0x66, 0xe9, 0xe5, 0xa5, 0x59, 0xfa, 0xb5, 0x52, 0xfc, 0x81, 0x4c, 0xd6, 0xf1, 0x37, 0xfe, + 0xeb, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfa, 0x58, 0xba, 0x29, 0x6d, 0x06, 0x00, 0x00, } func (x AgentState) String() string { @@ -569,6 +578,9 @@ func (this *AgentCapabilities) Equal(that interface{}) bool { if this.CollectsData != that1.CollectsData { return false } + if this.StoresData != that1.StoresData { + return false + } return true } func (this *AgentParameters) Equal(that interface{}) bool { @@ -761,9 +773,10 @@ func (this *AgentCapabilities) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&agentpb.AgentCapabilities{") s = append(s, "CollectsData: "+fmt.Sprintf("%#v", this.CollectsData)+",\n") + s = append(s, "StoresData: "+fmt.Sprintf("%#v", this.StoresData)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -881,6 +894,16 @@ func (m *AgentCapabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StoresData { + i-- + if m.StoresData { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.CollectsData { i-- if m.CollectsData { @@ -1207,6 +1230,9 @@ func (m *AgentCapabilities) Size() (n int) { if m.CollectsData { n += 2 } + if m.StoresData { + n += 2 + } return n } @@ -1346,6 +1372,7 @@ func (this *AgentCapabilities) String() string { } s := strings.Join([]string{`&AgentCapabilities{`, `CollectsData:` + fmt.Sprintf("%v", this.CollectsData) + `,`, + `StoresData:` + fmt.Sprintf("%v", this.StoresData) + `,`, `}`, }, "") return s @@ -1481,6 +1508,26 @@ func (m *AgentCapabilities) Unmarshal(dAtA []byte) error { } } m.CollectsData = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StoresData", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StoresData = bool(v != 0) default: iNdEx = preIndex skippy, err := skipAgent(dAtA[iNdEx:]) diff --git a/src/vizier/services/shared/agentpb/agent.proto b/src/vizier/services/shared/agentpb/agent.proto index b95cb1def0f..1e7586d039e 100644 --- a/src/vizier/services/shared/agentpb/agent.proto +++ b/src/vizier/services/shared/agentpb/agent.proto @@ -28,6 +28,7 @@ import "src/api/proto/uuidpb/uuid.proto"; // AgentCapabilities describes functions that the agent has available. message AgentCapabilities { bool collects_data = 1; + bool stores_data = 2; } message AgentParameters { From 9ab6ecfbe09f7451df8192bcae7b796df8f7438a Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 08:51:01 +0200 Subject: [PATCH 43/86] feature: testing dockerhub Signed-off-by: entlein --- vizier-chart/image-replace.sh | 130 ++++++++++++++++++ vizier-chart/templates/03_vizier_etcd.yaml | 12 +- .../templates/04_vizier_persistent.yaml | 12 +- vizier-chart/templates/05_vizier_etcd_ap.yaml | 12 +- .../templates/06_vizier_persistent_ap.yaml | 12 +- vizier-chart/templates/image-replace.sh | 130 ------------------ vizier-chart/values.yaml | 4 +- 7 files changed, 157 insertions(+), 155 deletions(-) create mode 100755 vizier-chart/image-replace.sh delete mode 100755 vizier-chart/templates/image-replace.sh diff --git a/vizier-chart/image-replace.sh b/vizier-chart/image-replace.sh new file mode 100755 index 00000000000..445e18b79ad --- /dev/null +++ b/vizier-chart/image-replace.sh @@ -0,0 +1,130 @@ +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +00_secrets.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +01_nats.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +02_etcd.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +03_vizier_etcd.yaml + + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +04_vizier_persistent.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +05_vizier_etcd_ap.yaml + +sed -i '' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +06_vizier_persistent_ap.yaml + + + + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +00_secrets.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +01_nats.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +02_etcd.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +03_vizier_etcd.yaml + + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +04_vizier_persistent.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +05_vizier_etcd_ap.yaml + +sed -i '' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ +-e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ +06_vizier_persistent_ap.yaml \ No newline at end of file diff --git a/vizier-chart/templates/03_vizier_etcd.yaml b/vizier-chart/templates/03_vizier_etcd.yaml index cda9fa2a789..30f0c069f2d 100644 --- a/vizier-chart/templates/03_vizier_etcd.yaml +++ b/vizier-chart/templates/03_vizier_etcd.yaml @@ -1363,7 +1363,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' name: app ports: - containerPort: 59300 @@ -1567,7 +1567,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1732,7 +1732,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1923,7 +1923,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -2142,7 +2142,7 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{end}}' name: pem resources: limits: @@ -2264,7 +2264,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' name: provisioner securityContext: allowPrivilegeEscalation: false diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml index 87ca06b8448..e0a58d6b40c 100644 --- a/vizier-chart/templates/04_vizier_persistent.yaml +++ b/vizier-chart/templates/04_vizier_persistent.yaml @@ -1391,7 +1391,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' name: app ports: - containerPort: 59300 @@ -1595,7 +1595,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1778,7 +1778,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1961,7 +1961,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -2176,7 +2176,7 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{end}}' name: pem resources: limits: @@ -2298,7 +2298,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' name: provisioner securityContext: allowPrivilegeEscalation: false diff --git a/vizier-chart/templates/05_vizier_etcd_ap.yaml b/vizier-chart/templates/05_vizier_etcd_ap.yaml index 55f4a473bc1..5ad0a66a0d7 100644 --- a/vizier-chart/templates/05_vizier_etcd_ap.yaml +++ b/vizier-chart/templates/05_vizier_etcd_ap.yaml @@ -1363,7 +1363,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' name: app ports: - containerPort: 59300 @@ -1567,7 +1567,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1732,7 +1732,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1923,7 +1923,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -2142,7 +2142,7 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{end}}' name: pem resources: limits: @@ -2285,7 +2285,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' name: provisioner securityContext: allowPrivilegeEscalation: false diff --git a/vizier-chart/templates/06_vizier_persistent_ap.yaml b/vizier-chart/templates/06_vizier_persistent_ap.yaml index d0eb7e59127..42f68fdf58c 100644 --- a/vizier-chart/templates/06_vizier_persistent_ap.yaml +++ b/vizier-chart/templates/06_vizier_persistent_ap.yaml @@ -1391,7 +1391,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' name: app ports: - containerPort: 59300 @@ -1595,7 +1595,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1778,7 +1778,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -1961,7 +1961,7 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' livenessProbe: httpGet: path: /healthz @@ -2176,7 +2176,7 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{end}}' name: pem resources: limits: @@ -2319,7 +2319,7 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' + image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' name: provisioner securityContext: allowPrivilegeEscalation: false diff --git a/vizier-chart/templates/image-replace.sh b/vizier-chart/templates/image-replace.sh deleted file mode 100755 index 2d1dd4f9746..00000000000 --- a/vizier-chart/templates/image-replace.sh +++ /dev/null @@ -1,130 +0,0 @@ -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -00_secrets.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -01_nats.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -02_etcd.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -03_vizier_etcd.yaml - - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -04_vizier_persistent.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -05_vizier_etcd_ap.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -06_vizier_persistent_ap.yaml - - - - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -00_secrets.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -01_nats.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -02_etcd.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -03_vizier_etcd.yaml - - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -04_vizier_persistent.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -05_vizier_etcd_ap.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|ghcr.io/k8sstormcenter/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|ghcr.io/k8sstormcenter/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|ghcr.io/k8sstormcenter/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -06_vizier_persistent_ap.yaml \ No newline at end of file diff --git a/vizier-chart/values.yaml b/vizier-chart/values.yaml index 7676b4be8d8..2659a2b9b88 100644 --- a/vizier-chart/values.yaml +++ b/vizier-chart/values.yaml @@ -1,5 +1,7 @@ deployKey: clusterName: honeypixie +cloudAddr: devCloudNamespace: plc namespace: pl -imageTag: 2025-05-07_08-37-30.237_UTC \ No newline at end of file +imageTag: 2025-05-07_08-37-30.237_UTC +imageRegistry: mbgurcay \ No newline at end of file From 04b3e5fcf69004151b8fb7735a30f70aa60aa200 Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 08:52:15 +0200 Subject: [PATCH 44/86] feature: testing dockerhub Signed-off-by: entlein --- vizier-chart/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vizier-chart/values.yaml b/vizier-chart/values.yaml index 2659a2b9b88..b1b37a0e826 100644 --- a/vizier-chart/values.yaml +++ b/vizier-chart/values.yaml @@ -1,6 +1,6 @@ deployKey: clusterName: honeypixie -cloudAddr: +cloudAddr: getcosmic.ai devCloudNamespace: plc namespace: pl imageTag: 2025-05-07_08-37-30.237_UTC From 726abbc4a5b9708fe4a63d996b868df00cb0d03c Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 09:18:09 +0200 Subject: [PATCH 45/86] feature: testing dockerhub Signed-off-by: entlein --- vizier-chart/templates/03_vizier_etcd.yaml | 46 +++++++++---------- vizier-chart/templates/05_vizier_etcd_ap.yaml | 46 +++++++++---------- .../templates/06_vizier_persistent_ap.yaml | 46 +++++++++---------- 3 files changed, 69 insertions(+), 69 deletions(-) diff --git a/vizier-chart/templates/03_vizier_etcd.yaml b/vizier-chart/templates/03_vizier_etcd.yaml index 30f0c069f2d..7a3d0b0b3bb 100644 --- a/vizier-chart/templates/03_vizier_etcd.yaml +++ b/vizier-chart/templates/03_vizier_etcd.yaml @@ -1,27 +1,27 @@ {{if and (not .Values.autopilot) .Values.useEtcdOperator}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +# --- +# apiVersion: v1 +# kind: ServiceAccount +# metadata: +# annotations: +# {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} +# {{ $kv := split "=" $element -}} +# {{if eq (len $kv) 2 -}} +# {{ $kv._0 }}: "{{ $kv._1 }}" +# {{- end}} +# {{end}}{{end}} +# labels: +# {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} +# {{ $kv := split "=" $element -}} +# {{if eq (len $kv) 2 -}} +# {{ $kv._0 }}: "{{ $kv._1 }}" +# {{- end}} +# {{end}}{{end}} +# app: pl-monitoring +# component: vizier +# vizier-bootstrap: "true" +# name: cloud-conn-service-account +# namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} --- apiVersion: v1 kind: ServiceAccount diff --git a/vizier-chart/templates/05_vizier_etcd_ap.yaml b/vizier-chart/templates/05_vizier_etcd_ap.yaml index 5ad0a66a0d7..b1dd66136b3 100644 --- a/vizier-chart/templates/05_vizier_etcd_ap.yaml +++ b/vizier-chart/templates/05_vizier_etcd_ap.yaml @@ -1,27 +1,27 @@ {{if and (.Values.autopilot) (.Values.useEtcdOperator)}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +# --- +# apiVersion: v1 +# kind: ServiceAccount +# metadata: +# annotations: +# {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} +# {{ $kv := split "=" $element -}} +# {{if eq (len $kv) 2 -}} +# {{ $kv._0 }}: "{{ $kv._1 }}" +# {{- end}} +# {{end}}{{end}} +# labels: +# {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} +# {{ $kv := split "=" $element -}} +# {{if eq (len $kv) 2 -}} +# {{ $kv._0 }}: "{{ $kv._1 }}" +# {{- end}} +# {{end}}{{end}} +# app: pl-monitoring +# component: vizier +# vizier-bootstrap: "true" +# name: cloud-conn-service-account +# namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} --- apiVersion: v1 kind: ServiceAccount diff --git a/vizier-chart/templates/06_vizier_persistent_ap.yaml b/vizier-chart/templates/06_vizier_persistent_ap.yaml index 42f68fdf58c..5bbc39f1681 100644 --- a/vizier-chart/templates/06_vizier_persistent_ap.yaml +++ b/vizier-chart/templates/06_vizier_persistent_ap.yaml @@ -1,27 +1,27 @@ {{if and (.Values.autopilot) (not .Values.useEtcdOperator)}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +# --- +# apiVersion: v1 +# kind: ServiceAccount +# metadata: +# annotations: +# {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} +# {{ $kv := split "=" $element -}} +# {{if eq (len $kv) 2 -}} +# {{ $kv._0 }}: "{{ $kv._1 }}" +# {{- end}} +# {{end}}{{end}} +# labels: +# {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} +# {{ $kv := split "=" $element -}} +# {{if eq (len $kv) 2 -}} +# {{ $kv._0 }}: "{{ $kv._1 }}" +# {{- end}} +# {{end}}{{end}} +# app: pl-monitoring +# component: vizier +# vizier-bootstrap: "true" +# name: cloud-conn-service-account +# namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} --- apiVersion: v1 kind: ServiceAccount From 40e7191535fb25a452bf76a939a55dae880aad54 Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 09:18:50 +0200 Subject: [PATCH 46/86] feature: testing dockerhub Signed-off-by: entlein --- .../templates/04_vizier_persistent.yaml | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml index e0a58d6b40c..1c4e5e3e169 100644 --- a/vizier-chart/templates/04_vizier_persistent.yaml +++ b/vizier-chart/templates/04_vizier_persistent.yaml @@ -1,27 +1,27 @@ {{if and (not .Values.autopilot) (not .Values.useEtcdOperator)}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +# --- +# apiVersion: v1 +# kind: ServiceAccount +# metadata: +# annotations: +# {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} +# {{ $kv := split "=" $element -}} +# {{if eq (len $kv) 2 -}} +# {{ $kv._0 }}: "{{ $kv._1 }}" +# {{- end}} +# {{end}}{{end}} +# labels: +# {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} +# {{ $kv := split "=" $element -}} +# {{if eq (len $kv) 2 -}} +# {{ $kv._0 }}: "{{ $kv._1 }}" +# {{- end}} +# {{end}}{{end}} +# app: pl-monitoring +# component: vizier +# vizier-bootstrap: "true" +# name: cloud-conn-service-account +# namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} --- apiVersion: v1 kind: ServiceAccount From 90f12028a06e07480fc22673276d2c222b171aaa Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 09:21:22 +0200 Subject: [PATCH 47/86] feature: testing dockerhub Signed-off-by: entlein --- vizier-chart/templates/03_vizier_etcd.yaml | 114 +----------------- .../templates/04_vizier_persistent.yaml | 113 ----------------- vizier-chart/templates/05_vizier_etcd_ap.yaml | 113 ----------------- .../templates/06_vizier_persistent_ap.yaml | 113 ----------------- 4 files changed, 1 insertion(+), 452 deletions(-) diff --git a/vizier-chart/templates/03_vizier_etcd.yaml b/vizier-chart/templates/03_vizier_etcd.yaml index 7a3d0b0b3bb..58c12bdf62b 100644 --- a/vizier-chart/templates/03_vizier_etcd.yaml +++ b/vizier-chart/templates/03_vizier_etcd.yaml @@ -1,117 +1,5 @@ {{if and (not .Values.autopilot) .Values.useEtcdOperator}} -# --- -# apiVersion: v1 -# kind: ServiceAccount -# metadata: -# annotations: -# {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} -# {{ $kv := split "=" $element -}} -# {{if eq (len $kv) 2 -}} -# {{ $kv._0 }}: "{{ $kv._1 }}" -# {{- end}} -# {{end}}{{end}} -# labels: -# {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} -# {{ $kv := split "=" $element -}} -# {{if eq (len $kv) 2 -}} -# {{ $kv._0 }}: "{{ $kv._1 }}" -# {{- end}} -# {{end}}{{end}} -# app: pl-monitoring -# component: vizier -# vizier-bootstrap: "true" -# name: cloud-conn-service-account -# namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} + --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml index 1c4e5e3e169..f41b22cbd93 100644 --- a/vizier-chart/templates/04_vizier_persistent.yaml +++ b/vizier-chart/templates/04_vizier_persistent.yaml @@ -1,117 +1,4 @@ {{if and (not .Values.autopilot) (not .Values.useEtcdOperator)}} -# --- -# apiVersion: v1 -# kind: ServiceAccount -# metadata: -# annotations: -# {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} -# {{ $kv := split "=" $element -}} -# {{if eq (len $kv) 2 -}} -# {{ $kv._0 }}: "{{ $kv._1 }}" -# {{- end}} -# {{end}}{{end}} -# labels: -# {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} -# {{ $kv := split "=" $element -}} -# {{if eq (len $kv) 2 -}} -# {{ $kv._0 }}: "{{ $kv._1 }}" -# {{- end}} -# {{end}}{{end}} -# app: pl-monitoring -# component: vizier -# vizier-bootstrap: "true" -# name: cloud-conn-service-account -# namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/vizier-chart/templates/05_vizier_etcd_ap.yaml b/vizier-chart/templates/05_vizier_etcd_ap.yaml index b1dd66136b3..23c71c5c494 100644 --- a/vizier-chart/templates/05_vizier_etcd_ap.yaml +++ b/vizier-chart/templates/05_vizier_etcd_ap.yaml @@ -1,117 +1,4 @@ {{if and (.Values.autopilot) (.Values.useEtcdOperator)}} -# --- -# apiVersion: v1 -# kind: ServiceAccount -# metadata: -# annotations: -# {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} -# {{ $kv := split "=" $element -}} -# {{if eq (len $kv) 2 -}} -# {{ $kv._0 }}: "{{ $kv._1 }}" -# {{- end}} -# {{end}}{{end}} -# labels: -# {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} -# {{ $kv := split "=" $element -}} -# {{if eq (len $kv) 2 -}} -# {{ $kv._0 }}: "{{ $kv._1 }}" -# {{- end}} -# {{end}}{{end}} -# app: pl-monitoring -# component: vizier -# vizier-bootstrap: "true" -# name: cloud-conn-service-account -# namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/vizier-chart/templates/06_vizier_persistent_ap.yaml b/vizier-chart/templates/06_vizier_persistent_ap.yaml index 5bbc39f1681..2be5b0e57d1 100644 --- a/vizier-chart/templates/06_vizier_persistent_ap.yaml +++ b/vizier-chart/templates/06_vizier_persistent_ap.yaml @@ -1,117 +1,4 @@ {{if and (.Values.autopilot) (not .Values.useEtcdOperator)}} -# --- -# apiVersion: v1 -# kind: ServiceAccount -# metadata: -# annotations: -# {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} -# {{ $kv := split "=" $element -}} -# {{if eq (len $kv) 2 -}} -# {{ $kv._0 }}: "{{ $kv._1 }}" -# {{- end}} -# {{end}}{{end}} -# labels: -# {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} -# {{ $kv := split "=" $element -}} -# {{if eq (len $kv) 2 -}} -# {{ $kv._0 }}: "{{ $kv._1 }}" -# {{- end}} -# {{end}}{{end}} -# app: pl-monitoring -# component: vizier -# vizier-bootstrap: "true" -# name: cloud-conn-service-account -# namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role From b750d89c1f4bf35de5ebde6722cdbafb2b533b06 Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 09:50:17 +0200 Subject: [PATCH 48/86] feature: testing dockerhub Signed-off-by: entlein --- vizier-chart/templates/03_vizier_etcd.yaml | 114 +++++++++++++++++- .../templates/04_vizier_persistent.yaml | 113 +++++++++++++++++ vizier-chart/templates/05_vizier_etcd_ap.yaml | 113 +++++++++++++++++ .../templates/06_vizier_persistent_ap.yaml | 113 +++++++++++++++++ 4 files changed, 452 insertions(+), 1 deletion(-) diff --git a/vizier-chart/templates/03_vizier_etcd.yaml b/vizier-chart/templates/03_vizier_etcd.yaml index 58c12bdf62b..30f0c069f2d 100644 --- a/vizier-chart/templates/03_vizier_etcd.yaml +++ b/vizier-chart/templates/03_vizier_etcd.yaml @@ -1,5 +1,117 @@ {{if and (not .Values.autopilot) .Values.useEtcdOperator}} - +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml index f41b22cbd93..e0a58d6b40c 100644 --- a/vizier-chart/templates/04_vizier_persistent.yaml +++ b/vizier-chart/templates/04_vizier_persistent.yaml @@ -1,5 +1,118 @@ {{if and (not .Values.autopilot) (not .Values.useEtcdOperator)}} --- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: diff --git a/vizier-chart/templates/05_vizier_etcd_ap.yaml b/vizier-chart/templates/05_vizier_etcd_ap.yaml index 23c71c5c494..5ad0a66a0d7 100644 --- a/vizier-chart/templates/05_vizier_etcd_ap.yaml +++ b/vizier-chart/templates/05_vizier_etcd_ap.yaml @@ -1,5 +1,118 @@ {{if and (.Values.autopilot) (.Values.useEtcdOperator)}} --- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: diff --git a/vizier-chart/templates/06_vizier_persistent_ap.yaml b/vizier-chart/templates/06_vizier_persistent_ap.yaml index 2be5b0e57d1..42f68fdf58c 100644 --- a/vizier-chart/templates/06_vizier_persistent_ap.yaml +++ b/vizier-chart/templates/06_vizier_persistent_ap.yaml @@ -1,5 +1,118 @@ {{if and (.Values.autopilot) (not .Values.useEtcdOperator)}} --- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: cloud-conn-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: metadata-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-cert-provisioner-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + vizier-bootstrap: "true" + name: pl-updater-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + labels: + {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} + {{ $kv := split "=" $element -}} + {{if eq (len $kv) 2 -}} + {{ $kv._0 }}: "{{ $kv._1 }}" + {{- end}} + {{end}}{{end}} + app: pl-monitoring + component: vizier + name: query-broker-service-account + namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: From 75e1cf22c88f9f8d6fbf38f6a7b1998f36cb48c6 Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 10:48:30 +0200 Subject: [PATCH 49/86] feature: testing dockerhub Signed-off-by: entlein --- vizier-chart/values.yaml | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/vizier-chart/values.yaml b/vizier-chart/values.yaml index b1b37a0e826..813ca567001 100644 --- a/vizier-chart/values.yaml +++ b/vizier-chart/values.yaml @@ -3,5 +3,37 @@ clusterName: honeypixie cloudAddr: getcosmic.ai devCloudNamespace: plc namespace: pl -imageTag: 2025-05-07_08-37-30.237_UTC -imageRegistry: mbgurcay \ No newline at end of file +imageTag: 2025-05-09_14-20-42.033_UTC +imageRegistry: mbgurcay + + +kubectl get secret -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite secret {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get svc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite svc {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get sa -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl sa --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get cm -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl cm --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get pvc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl pvc --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get clusterrole -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl clusterrole --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get clusterrolebinding -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl clusterrolebinding --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get role -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl role --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get rolebinding -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl rolebinding --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get ds -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl ds --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get deployment -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl deployment --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get statefulset -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl statefulset --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl + + + +kubectl get sa -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl sa --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get svc -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl svc --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get secret -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl secret --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get cm -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl cm --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get pvc -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl pvc --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get clusterrole -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl clusterrole --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get clusterrolebinding -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl clusterrolebinding --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get role -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl role --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get rolebinding -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl rolebinding --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get ds -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl ds --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get deployment -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl deployment --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get statefulset -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl statefulset --overwrite {} app.kubernetes.io/managed-by=Helm + + + From a52ae3856855b02c0e72f6eecac98583e14ba4ca Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 10:53:06 +0200 Subject: [PATCH 50/86] feature: testing dockerhub Signed-off-by: entlein --- vizier-chart/templates/04_vizier_persistent.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml index e0a58d6b40c..7fa8e5feb9a 100644 --- a/vizier-chart/templates/04_vizier_persistent.yaml +++ b/vizier-chart/templates/04_vizier_persistent.yaml @@ -1391,7 +1391,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: app ports: - containerPort: 59300 @@ -1595,7 +1596,8 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -1778,7 +1780,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz From b4d31f94ffb18799527833d02a7569fb97133d60 Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 11:01:37 +0200 Subject: [PATCH 51/86] feature: vizier persistent now working with our image Signed-off-by: entlein --- vizier-chart/templates/04_vizier_persistent.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml index 7fa8e5feb9a..1f306913ac2 100644 --- a/vizier-chart/templates/04_vizier_persistent.yaml +++ b/vizier-chart/templates/04_vizier_persistent.yaml @@ -1964,7 +1964,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -2179,7 +2180,8 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: pem resources: limits: @@ -2301,7 +2303,8 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: provisioner securityContext: allowPrivilegeEscalation: false From 3dfa7a61e3b0c017f0c25299209cb8fd7f183dbe Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 11:02:12 +0200 Subject: [PATCH 52/86] feature: vizier persistent now working with our image Signed-off-by: entlein --- vizier-chart/helm-labels.sh | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 vizier-chart/helm-labels.sh diff --git a/vizier-chart/helm-labels.sh b/vizier-chart/helm-labels.sh new file mode 100644 index 00000000000..a469ad27619 --- /dev/null +++ b/vizier-chart/helm-labels.sh @@ -0,0 +1,31 @@ + +kubectl get secret -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite secret {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get svc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite svc {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get sa -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl sa --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get cm -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl cm --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get pvc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl pvc --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get clusterrole -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl clusterrole --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get clusterrolebinding -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl clusterrolebinding --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get role -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl role --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get rolebinding -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl rolebinding --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get ds -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl ds --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get deployment -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl deployment --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl +kubectl get statefulset -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl statefulset --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl + + + +kubectl get sa -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl sa --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get svc -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl svc --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get secret -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl secret --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get cm -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl cm --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get pvc -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl pvc --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get clusterrole -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl clusterrole --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get clusterrolebinding -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl clusterrolebinding --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get role -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl role --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get rolebinding -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl rolebinding --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get ds -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl ds --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get deployment -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl deployment --overwrite {} app.kubernetes.io/managed-by=Helm +kubectl get statefulset -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl statefulset --overwrite {} app.kubernetes.io/managed-by=Helm + + + From cef03b8bbdfd3f891612f18c767d9e101f1cf6cd Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 11:04:24 +0200 Subject: [PATCH 53/86] feature: vizier etcd now working with our image Signed-off-by: entlein --- vizier-chart/templates/03_vizier_etcd.yaml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/vizier-chart/templates/03_vizier_etcd.yaml b/vizier-chart/templates/03_vizier_etcd.yaml index 30f0c069f2d..87ccb522974 100644 --- a/vizier-chart/templates/03_vizier_etcd.yaml +++ b/vizier-chart/templates/03_vizier_etcd.yaml @@ -1363,7 +1363,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: app ports: - containerPort: 59300 @@ -1567,7 +1568,8 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -1732,7 +1734,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -1923,7 +1926,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -2142,7 +2146,8 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: pem resources: limits: @@ -2264,7 +2269,8 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: provisioner securityContext: allowPrivilegeEscalation: false From 0e4d43cce176b3d63c09b2c084efd7a827c725c2 Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 11:16:33 +0200 Subject: [PATCH 54/86] feature: vizier etcd now working with our image Signed-off-by: entlein --- vizier-chart/templates/05_vizier_etcd_ap.yaml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/vizier-chart/templates/05_vizier_etcd_ap.yaml b/vizier-chart/templates/05_vizier_etcd_ap.yaml index 5ad0a66a0d7..3c246bd3b11 100644 --- a/vizier-chart/templates/05_vizier_etcd_ap.yaml +++ b/vizier-chart/templates/05_vizier_etcd_ap.yaml @@ -1363,7 +1363,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: app ports: - containerPort: 59300 @@ -1567,7 +1568,8 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -1732,7 +1734,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -1923,7 +1926,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -2142,7 +2146,8 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: pem resources: limits: @@ -2285,7 +2290,8 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: provisioner securityContext: allowPrivilegeEscalation: false From 8a3d8e2ca51ed7ff377ec3c16aafaa4cfbcd4e3c Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 11:19:38 +0200 Subject: [PATCH 55/86] feature: vizier persistent ap now working with our image Signed-off-by: entlein --- .../templates/06_vizier_persistent_ap.yaml | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/vizier-chart/templates/06_vizier_persistent_ap.yaml b/vizier-chart/templates/06_vizier_persistent_ap.yaml index 42f68fdf58c..99940e52bd5 100644 --- a/vizier-chart/templates/06_vizier_persistent_ap.yaml +++ b/vizier-chart/templates/06_vizier_persistent_ap.yaml @@ -1391,7 +1391,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: app ports: - containerPort: 59300 @@ -1595,7 +1596,8 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -1778,7 +1780,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -1961,7 +1964,8 @@ spec: envFrom: - configMapRef: name: pl-tls-config - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}' + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz @@ -2176,7 +2180,8 @@ spec: optional: true - name: PL_CLOCK_CONVERTER value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: pem resources: limits: @@ -2319,7 +2324,8 @@ spec: - configMapRef: name: pl-cluster-config optional: true - image: '{{ if .Values.registry }}{{ .Values.registry }}/{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{else}}{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}{{end}}' + image: '{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}' + imagePullPolicy: Always name: provisioner securityContext: allowPrivilegeEscalation: false From a54fd5e4d633c3defd9efaaf89a82bc4949aab82 Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 11:20:35 +0200 Subject: [PATCH 56/86] feature: vizier persistent ap now working with our image Signed-off-by: entlein --- vizier-chart/image-replace.sh | 130 ---------------------------------- vizier-chart/values.yaml | 34 +-------- 2 files changed, 1 insertion(+), 163 deletions(-) delete mode 100755 vizier-chart/image-replace.sh diff --git a/vizier-chart/image-replace.sh b/vizier-chart/image-replace.sh deleted file mode 100755 index 445e18b79ad..00000000000 --- a/vizier-chart/image-replace.sh +++ /dev/null @@ -1,130 +0,0 @@ -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -00_secrets.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -01_nats.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -02_etcd.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -03_vizier_etcd.yaml - - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -04_vizier_persistent.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -05_vizier_etcd_ap.yaml - -sed -i '' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io-pixie-oss-pixie-prod-vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -06_vizier_persistent_ap.yaml - - - - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -00_secrets.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -01_nats.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -02_etcd.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -03_vizier_etcd.yaml - - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -04_vizier_persistent.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -05_vizier_etcd_ap.yaml - -sed -i '' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-pem_image:0.14.15|{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-kelvin_image:0.14.15|{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-metadata_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-query_broker_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cloud_connector_server_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}|g' \ --e 's|gcr.io/pixie-oss/pixie-prod/vizier-cert_provisioner_image:0.14.15|{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}|g' \ -06_vizier_persistent_ap.yaml \ No newline at end of file diff --git a/vizier-chart/values.yaml b/vizier-chart/values.yaml index 813ca567001..7cb5ab1ca11 100644 --- a/vizier-chart/values.yaml +++ b/vizier-chart/values.yaml @@ -1,39 +1,7 @@ -deployKey: +deployKey: clusterName: honeypixie cloudAddr: getcosmic.ai devCloudNamespace: plc namespace: pl imageTag: 2025-05-09_14-20-42.033_UTC imageRegistry: mbgurcay - - -kubectl get secret -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite secret {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get svc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite svc {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get sa -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl sa --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get cm -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl cm --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get pvc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl pvc --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get clusterrole -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl clusterrole --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get clusterrolebinding -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl clusterrolebinding --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get role -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl role --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get rolebinding -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl rolebinding --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get ds -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl ds --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get deployment -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl deployment --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get statefulset -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl statefulset --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl - - - -kubectl get sa -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl sa --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get svc -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl svc --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get secret -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl secret --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get cm -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl cm --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get pvc -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl pvc --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get clusterrole -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl clusterrole --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get clusterrolebinding -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl clusterrolebinding --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get role -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl role --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get rolebinding -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl rolebinding --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get ds -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl ds --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get deployment -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl deployment --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get statefulset -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl statefulset --overwrite {} app.kubernetes.io/managed-by=Helm - - - From 698cb0debd28e49e0f1702ec3291d3a7399b53d1 Mon Sep 17 00:00:00 2001 From: entlein Date: Mon, 12 May 2025 11:30:35 +0200 Subject: [PATCH 57/86] feature: adding helm installer Signed-off-by: entlein --- vizier-chart/{helm-labels.sh => helm-install.sh} | 9 +++++++++ vizier-chart/values.yaml | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) rename vizier-chart/{helm-labels.sh => helm-install.sh} (95%) diff --git a/vizier-chart/helm-labels.sh b/vizier-chart/helm-install.sh similarity index 95% rename from vizier-chart/helm-labels.sh rename to vizier-chart/helm-install.sh index a469ad27619..5d14b60d58a 100644 --- a/vizier-chart/helm-labels.sh +++ b/vizier-chart/helm-install.sh @@ -1,4 +1,5 @@ +#First you install pixie via px deploy, then run this script kubectl get secret -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite secret {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl kubectl get svc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite svc {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl kubectl get sa -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl sa --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl @@ -27,5 +28,13 @@ kubectl get ds -n pl -o json | jq '.items[] | .metadata | select(.labels."ap kubectl get deployment -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl deployment --overwrite {} app.kubernetes.io/managed-by=Helm kubectl get statefulset -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl statefulset --overwrite {} app.kubernetes.io/managed-by=Helm +keyid=f60a3c55-91fe-4dbc-b984-bf6ed4fdc323 +key=$(px api-key get $keyid) +if [ ! -f myvalues.yaml ]; then + echo "Error: myvalues.yaml not found" + exit 1 +fi + +helm upgrade --install pixie . --namespace pl --create-namespace --values myvalues.yaml diff --git a/vizier-chart/values.yaml b/vizier-chart/values.yaml index 7cb5ab1ca11..e2eee6365bb 100644 --- a/vizier-chart/values.yaml +++ b/vizier-chart/values.yaml @@ -1,4 +1,4 @@ -deployKey: +deployKey: $PIXIE_DEPLOY_KEY clusterName: honeypixie cloudAddr: getcosmic.ai devCloudNamespace: plc From 20ad8d1f9855f91813ac71ed86b6954b30f21877 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sat, 6 Sep 2025 04:05:32 +0000 Subject: [PATCH 58/86] Update clickhouse to log to stdout. Stil having issues with test timeout Signed-off-by: Dom Del Nano --- src/carnot/exec/BUILD.bazel | 19 ++ .../exec/clickhouse_source_node_test.cc | 252 ++++++++++++++++++ .../testing/container_images/BUILD.bazel | 21 +- .../clickhouse_logging_config.xml | 7 + 4 files changed, 298 insertions(+), 1 deletion(-) create mode 100644 src/carnot/exec/clickhouse_source_node_test.cc create mode 100644 src/stirling/source_connectors/socket_tracer/testing/container_images/clickhouse_logging_config.xml diff --git a/src/carnot/exec/BUILD.bazel b/src/carnot/exec/BUILD.bazel index 228b352501c..f704f070f43 100644 --- a/src/carnot/exec/BUILD.bazel +++ b/src/carnot/exec/BUILD.bazel @@ -300,3 +300,22 @@ pl_cc_test( "@com_github_grpc_grpc//:grpc++_test", ], ) + +pl_cc_test( + name = "clickhouse_source_node_test", + srcs = ["clickhouse_source_node_test.cc"], + data = [ + "//src/stirling/source_connectors/socket_tracer/testing/container_images:clickhouse.tar", + ], + tags = [ + "requires_docker", + "exclusive", + ], + deps = [ + ":cc_library", + ":exec_node_test_helpers", + ":test_utils", + "//src/common/testing/test_utils:cc_library", + "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", + ], +) diff --git a/src/carnot/exec/clickhouse_source_node_test.cc b/src/carnot/exec/clickhouse_source_node_test.cc new file mode 100644 index 00000000000..c79d178f433 --- /dev/null +++ b/src/carnot/exec/clickhouse_source_node_test.cc @@ -0,0 +1,252 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "src/common/testing/test_utils/container_runner.h" +#include "src/common/testing/testing.h" +#include "src/common/base/logging.h" + +namespace px { +namespace carnot { +namespace exec { + +using ::testing::_; +using ::testing::ElementsAre; + +class ClickHouseSourceNodeTest : public ::testing::Test { + protected: + static constexpr char kClickHouseImage[] = + "src/stirling/source_connectors/socket_tracer/testing/container_images/clickhouse.tar"; + static constexpr char kClickHouseReadyMessage[] = "Ready for connections"; + static constexpr int kClickHousePort = 9000; + + void SetUp() override { + clickhouse_server_ = std::make_unique( + px::testing::BazelRunfilePath(kClickHouseImage), "clickhouse_test", kClickHouseReadyMessage); + + // Start ClickHouse server with necessary options + std::vector options = { + absl::Substitute("--publish=$0:$0", kClickHousePort), + "--env=CLICKHOUSE_PASSWORD=test_password", + "--network=host", + }; + + ASSERT_OK(clickhouse_server_->Run( + std::chrono::seconds{60}, // timeout + options, + {}, // args + true, // use_host_pid_namespace + std::chrono::seconds{300} // container_lifetime + )); + + // Give ClickHouse more time to fully initialize + std::this_thread::sleep_for(std::chrono::seconds(5)); + + // Create ClickHouse client with retry logic (using default auth) + clickhouse::ClientOptions client_options; + client_options.SetHost("localhost"); + client_options.SetPort(kClickHousePort); + client_options.SetUser("default"); + client_options.SetPassword("test_password"); + client_options.SetDefaultDatabase("default"); + + // Retry connection a few times + const int kMaxRetries = 5; + for (int i = 0; i < kMaxRetries; ++i) { + LOG(INFO) << "Attempting to connect to ClickHouse (attempt " << (i + 1) + << "/" << kMaxRetries << ")..."; + try { + client_ = std::make_unique(client_options); + // Test the connection with a simple query + client_->Execute("SELECT 1"); + break; // Connection successful + } catch (const std::exception& e) { + LOG(WARNING) << "Failed to connect to ClickHouse (attempt " << (i + 1) + << "/" << kMaxRetries << "): " << e.what(); + if (i < kMaxRetries - 1) { + std::this_thread::sleep_for(std::chrono::seconds(2)); + } else { + throw; // Re-throw on last attempt + } + } + } + + // Create test table + CreateTestTable(); + } + + void TearDown() override { + if (client_) { + client_.reset(); + } + if (clickhouse_server_) { + clickhouse_server_->Wait(); + } + } + + void CreateTestTable() { + try { + // Drop table if exists + client_->Execute("DROP TABLE IF EXISTS test_table"); + + // Create test table + client_->Execute(R"( + CREATE TABLE test_table ( + id UInt64, + name String, + value Float64, + timestamp DateTime + ) ENGINE = MergeTree() + ORDER BY id + )"); + + // Insert test data + auto id_col = std::make_shared(); + auto name_col = std::make_shared(); + auto value_col = std::make_shared(); + auto timestamp_col = std::make_shared(); + + // Add test data + std::time_t now = std::time(nullptr); + id_col->Append(1); + name_col->Append("test1"); + value_col->Append(10.5); + timestamp_col->Append(now); + + id_col->Append(2); + name_col->Append("test2"); + value_col->Append(20.5); + timestamp_col->Append(now); + + id_col->Append(3); + name_col->Append("test3"); + value_col->Append(30.5); + timestamp_col->Append(now); + + clickhouse::Block block; + block.AppendColumn("id", id_col); + block.AppendColumn("name", name_col); + block.AppendColumn("value", value_col); + block.AppendColumn("timestamp", timestamp_col); + + client_->Insert("test_table", block); + + LOG(INFO) << "Test table created and populated successfully"; + } catch (const std::exception& e) { + LOG(ERROR) << "Failed to create test table: " << e.what(); + throw; + } + } + + std::unique_ptr clickhouse_server_; + std::unique_ptr client_; +}; + +TEST_F(ClickHouseSourceNodeTest, BasicQuery) { + // Test basic SELECT query + std::vector ids; + std::vector names; + std::vector values; + + client_->Select("SELECT id, name, value FROM test_table ORDER BY id", + [&](const clickhouse::Block& block) { + for (size_t i = 0; i < block.GetRowCount(); ++i) { + ids.push_back(block[0]->As()->At(i)); + names.emplace_back(block[1]->As()->At(i)); + values.push_back(block[2]->As()->At(i)); + } + } + ); + + EXPECT_THAT(ids, ElementsAre(1, 2, 3)); + EXPECT_THAT(names, ElementsAre("test1", "test2", "test3")); + EXPECT_THAT(values, ElementsAre(10.5, 20.5, 30.5)); +} + +TEST_F(ClickHouseSourceNodeTest, FilteredQuery) { + // Test SELECT with WHERE clause + std::vector ids; + std::vector names; + + client_->Select("SELECT id, name FROM test_table WHERE value > 15.0 ORDER BY id", + [&](const clickhouse::Block& block) { + for (size_t i = 0; i < block.GetRowCount(); ++i) { + ids.push_back(block[0]->As()->At(i)); + names.emplace_back(block[1]->As()->At(i)); + } + } + ); + + EXPECT_THAT(ids, ElementsAre(2, 3)); + EXPECT_THAT(names, ElementsAre("test2", "test3")); +} + +TEST_F(ClickHouseSourceNodeTest, AggregateQuery) { + // Test aggregate functions + double sum_value = 0; + uint64_t count = 0; + bool query_executed = false; + + try { + client_->Select("SELECT SUM(value), COUNT(*) FROM test_table", + [&](const clickhouse::Block& block) { + if (block.GetRowCount() > 0) { + sum_value = block[0]->As()->At(0); + count = block[1]->As()->At(0); + query_executed = true; + } + } + ); + } catch (const std::exception& e) { + LOG(ERROR) << "Aggregate query failed: " << e.what(); + FAIL() << "Aggregate query failed: " << e.what(); + } + + EXPECT_TRUE(query_executed) << "Query callback was not executed"; + EXPECT_DOUBLE_EQ(sum_value, 61.5); // 10.5 + 20.5 + 30.5 + EXPECT_EQ(count, 3); +} + +TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { + // Test query that returns no rows + int row_count = 0; + + client_->Select("SELECT * FROM test_table WHERE id > 1000", + [&](const clickhouse::Block& block) { + row_count += block.GetRowCount(); + } + ); + + EXPECT_EQ(row_count, 0); +} + +} // namespace exec +} // namespace carnot +} // namespace px diff --git a/src/stirling/source_connectors/socket_tracer/testing/container_images/BUILD.bazel b/src/stirling/source_connectors/socket_tracer/testing/container_images/BUILD.bazel index fb6c1a02e56..8b0a3e3ee2b 100644 --- a/src/stirling/source_connectors/socket_tracer/testing/container_images/BUILD.bazel +++ b/src/stirling/source_connectors/socket_tracer/testing/container_images/BUILD.bazel @@ -14,11 +14,15 @@ # # SPDX-License-Identifier: Apache-2.0 +load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer") load("//bazel:pl_build_system.bzl", "pl_boringcrypto_go_sdk", "pl_cc_test_library", "pl_go_sdk_version_template_to_label", "pl_go_test_versions", "pl_supported_go_sdk_versions") pl_all_supported_go_sdk_versions = pl_supported_go_sdk_versions + pl_boringcrypto_go_sdk -package(default_visibility = ["//src/stirling:__subpackages__"]) +package(default_visibility = [ + "//src/stirling:__subpackages__", + "//src/carnot:__subpackages__", +]) pl_cc_test_library( name = "bssl_container", @@ -416,3 +420,18 @@ pl_cc_test_library( ], deps = ["//src/common/testing/test_utils:cc_library"], ) + +# ClickHouse configuration layer for console logging +container_layer( + name = "clickhouse_config_layer", + files = ["clickhouse_logging_config.xml"], + mode = "0644", + directory = "/etc/clickhouse-server/config.d", +) + +container_image( + name = "clickhouse", + base = "@clickhouse_server_base_image//image", + layers = [":clickhouse_config_layer"], + visibility = ["//visibility:public"], +) diff --git a/src/stirling/source_connectors/socket_tracer/testing/container_images/clickhouse_logging_config.xml b/src/stirling/source_connectors/socket_tracer/testing/container_images/clickhouse_logging_config.xml new file mode 100644 index 00000000000..c2d570a3b02 --- /dev/null +++ b/src/stirling/source_connectors/socket_tracer/testing/container_images/clickhouse_logging_config.xml @@ -0,0 +1,7 @@ + + + true + + + + \ No newline at end of file From 3285d25faca4a0e6e60f54fd4fbaf81d8db31275 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 7 Sep 2025 05:31:39 +0000 Subject: [PATCH 59/86] Initial start to working clickhouse_source_node. It queries all the data at once Signed-off-by: Dom Del Nano --- bazel/container_images.bzl | 11 +- bazel/external/clickhouse_cpp.BUILD | 64 +++ bazel/repositories.bzl | 1 + bazel/repository_locations.bzl | 5 + src/carnot/exec/BUILD.bazel | 4 + src/carnot/exec/clickhouse_source_node.cc | 368 ++++++++++++++++++ src/carnot/exec/clickhouse_source_node.h | 96 +++++ .../exec/clickhouse_source_node_test.cc | 325 +++++++++++----- src/carnot/plan/operators.cc | 30 ++ src/carnot/plan/operators.h | 35 ++ src/carnot/planpb/plan.proto | 27 ++ src/carnot/planpb/test_proto.h | 51 +++ 12 files changed, 917 insertions(+), 100 deletions(-) create mode 100644 bazel/external/clickhouse_cpp.BUILD create mode 100644 src/carnot/exec/clickhouse_source_node.cc create mode 100644 src/carnot/exec/clickhouse_source_node.h diff --git a/bazel/container_images.bzl b/bazel/container_images.bzl index ab81087c1d6..e31088b2471 100644 --- a/bazel/container_images.bzl +++ b/bazel/container_images.bzl @@ -14,7 +14,7 @@ # # SPDX-License-Identifier: Apache-2.0 -load("@io_bazel_rules_docker//container:container.bzl", "container_pull") +load("@io_bazel_rules_docker//container:container.bzl", "container_pull", "container_image", "container_layer") # When adding an image here, first add it to scripts/regclient/regbot_deps.yaml # Once that is in, trigger the github workflow that mirrors the required image @@ -367,3 +367,12 @@ def stirling_test_images(): repository = "golang_1_22_grpc_server_with_buildinfo", digest = "sha256:67adba5e8513670fa37bd042862e7844f26239e8d2997ed8c3b0aa527bc04cc3", ) + + # ClickHouse server image for testing. + # clickhouse/clickhouse-server:25.7-alpine + container_pull( + name = "clickhouse_server_base_image", + registry = "docker.io", + repository = "clickhouse/clickhouse-server", + digest = "sha256:60c53a520a1caad6555eb6772a8a9c91bb09774c1c7ec87e3371ea3da254eeab", + ) diff --git a/bazel/external/clickhouse_cpp.BUILD b/bazel/external/clickhouse_cpp.BUILD new file mode 100644 index 00000000000..625dfb16ee1 --- /dev/null +++ b/bazel/external/clickhouse_cpp.BUILD @@ -0,0 +1,64 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@rules_foreign_cc//foreign_cc:defs.bzl", "cmake") + +licenses(["notice"]) + +exports_files(["LICENSE"]) + +filegroup( + name = "all", + srcs = glob(["**"]), +) + +cmake( + name = "clickhouse_cpp", + build_args = [ + "--", # <- Pass remaining options to the native tool. + "-j`nproc`", + "-l`nproc`", + ], + cache_entries = { + "BUILD_BENCHMARK": "OFF", + "BUILD_TESTS": "OFF", + "BUILD_SHARED_LIBS": "OFF", + "CMAKE_BUILD_TYPE": "Release", + "WITH_OPENSSL": "OFF", # Disable OpenSSL for now + "WITH_SYSTEM_ABSEIL": "OFF", # Use bundled abseil + "WITH_SYSTEM_LZ4": "OFF", # Use bundled for now + "WITH_SYSTEM_CITYHASH": "OFF", # Use bundled for now + "WITH_SYSTEM_ZSTD": "OFF", # Use bundled for now + "CMAKE_POSITION_INDEPENDENT_CODE": "ON", + }, + lib_source = ":all", + out_static_libs = [ + "libclickhouse-cpp-lib.a", + "liblz4.a", + "libcityhash.a", + "libzstdstatic.a", + "libabsl_int128.a", + ], + targets = [ + "clickhouse-cpp-lib", + "lz4", + "cityhash", + "zstdstatic", + "absl_int128", + ], + visibility = ["//visibility:public"], + working_directory = "", +) \ No newline at end of file diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 55d23e61323..1ce7f3db77e 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -147,6 +147,7 @@ def _cc_deps(): _bazel_repo("com_github_ariafallah_csv_parser", build_file = "//bazel/external:csv_parser.BUILD") _bazel_repo("com_github_arun11299_cpp_jwt", build_file = "//bazel/external:cpp_jwt.BUILD") _bazel_repo("com_github_cameron314_concurrentqueue", build_file = "//bazel/external:concurrentqueue.BUILD") + _bazel_repo("com_github_clickhouse_clickhouse_cpp", build_file = "//bazel/external:clickhouse_cpp.BUILD") _bazel_repo("com_github_cyan4973_xxhash", build_file = "//bazel/external:xxhash.BUILD") _bazel_repo("com_github_nlohmann_json", build_file = "//bazel/external:nlohmann_json.BUILD") _bazel_repo("com_github_packetzero_dnsparser", build_file = "//bazel/external:dnsparser.BUILD") diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 3252648e6d7..3e93af8621e 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -71,6 +71,11 @@ REPOSITORY_LOCATIONS = dict( strip_prefix = "concurrentqueue-1.0.3", urls = ["https://github.com/cameron314/concurrentqueue/archive/refs/tags/v1.0.3.tar.gz"], ), + com_github_clickhouse_clickhouse_cpp = dict( + sha256 = "1029a1bb0da8a72db1662a0418267742e66c82bb3e6b0ed116623a2fa8c65a58", + strip_prefix = "clickhouse-cpp-22dc9441cd807156511c6dcf97b1b878bd663d77", + urls = ["https://github.com/ClickHouse/clickhouse-cpp/archive/22dc9441cd807156511c6dcf97b1b878bd663d77.tar.gz"], + ), com_github_cyan4973_xxhash = dict( sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", strip_prefix = "xxHash-0.7.3", diff --git a/src/carnot/exec/BUILD.bazel b/src/carnot/exec/BUILD.bazel index f704f070f43..e845cf563d3 100644 --- a/src/carnot/exec/BUILD.bazel +++ b/src/carnot/exec/BUILD.bazel @@ -35,6 +35,7 @@ pl_cc_library( hdrs = [ "exec_node.h", "exec_state.h", + "clickhouse_source_node.h", ], deps = [ "//src/carnot/carnotpb:carnot_pl_cc_proto", @@ -46,6 +47,7 @@ pl_cc_library( "//src/shared/types:cc_library", "//src/table_store/table:cc_library", "@com_github_apache_arrow//:arrow", + "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", "@com_github_grpc_grpc//:grpc++", "@com_github_opentelemetry_proto//:logs_service_grpc_cc", "@com_github_opentelemetry_proto//:metrics_service_grpc_cc", @@ -315,7 +317,9 @@ pl_cc_test( ":cc_library", ":exec_node_test_helpers", ":test_utils", + "//src/carnot/planpb:plan_testutils", "//src/common/testing/test_utils:cc_library", "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", ], + timeout = "long", ) diff --git a/src/carnot/exec/clickhouse_source_node.cc b/src/carnot/exec/clickhouse_source_node.cc new file mode 100644 index 00000000000..675435943a1 --- /dev/null +++ b/src/carnot/exec/clickhouse_source_node.cc @@ -0,0 +1,368 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/carnot/exec/clickhouse_source_node.h" + +#include +#include + +#include + +#include "src/carnot/planpb/plan.pb.h" +#include "src/common/base/base.h" +#include "src/shared/types/arrow_adapter.h" +#include "src/shared/types/types.h" + +namespace px { +namespace carnot { +namespace exec { + +std::string ClickHouseSourceNode::DebugStringImpl() { + return absl::Substitute("Exec::ClickHouseSourceNode: ", + query_, output_descriptor_->DebugString()); +} + +Status ClickHouseSourceNode::InitImpl(const plan::Operator& plan_node) { + CHECK(plan_node.op_type() == planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR); + const auto* source_plan_node = static_cast(&plan_node); + + // Copy the plan node to local object + plan_node_ = std::make_unique(*source_plan_node); + + // Extract connection parameters from plan node + host_ = plan_node_->host(); + port_ = plan_node_->port(); + username_ = plan_node_->username(); + password_ = plan_node_->password(); + database_ = plan_node_->database(); + query_ = plan_node_->query(); + batch_size_ = plan_node_->batch_size(); + streaming_ = plan_node_->streaming(); + + return Status::OK(); +} + +Status ClickHouseSourceNode::PrepareImpl(ExecState*) { + return Status::OK(); +} + +Status ClickHouseSourceNode::OpenImpl(ExecState*) { + // Create ClickHouse client + clickhouse::ClientOptions options; + options.SetHost(host_); + options.SetPort(port_); + options.SetUser(username_); + options.SetPassword(password_); + options.SetDefaultDatabase(database_); + + try { + client_ = std::make_unique(options); + } catch (const std::exception& e) { + return error::Internal("Failed to create ClickHouse client: $0", e.what()); + } + + return Status::OK(); +} + +Status ClickHouseSourceNode::CloseImpl(ExecState*) { + client_.reset(); + result_blocks_.clear(); + return Status::OK(); +} + +StatusOr ClickHouseSourceNode::ClickHouseTypeToPixieType( + const clickhouse::TypeRef& ch_type) { + const auto& type_name = ch_type->GetName(); + + // Integer types - Pixie only supports INT64 + if (type_name == "UInt8" || type_name == "UInt16" || type_name == "UInt32" || + type_name == "UInt64" || type_name == "Int8" || type_name == "Int16" || + type_name == "Int32" || type_name == "Int64") { + return types::DataType::INT64; + } + + // UInt128 + if (type_name == "UInt128") { + return types::DataType::UINT128; + } + + // Floating point types - Pixie only supports FLOAT64 + if (type_name == "Float32" || type_name == "Float64") { + return types::DataType::FLOAT64; + } + + // String types + if (type_name == "String" || type_name == "FixedString") { + return types::DataType::STRING; + } + + // Date/time types + if (type_name == "DateTime" || type_name == "DateTime64") { + return types::DataType::TIME64NS; + } + + // Boolean + if (type_name == "Bool") { + return types::DataType::BOOLEAN; + } + + return error::InvalidArgument("Unsupported ClickHouse type: $0", type_name); +} + +StatusOr> ClickHouseSourceNode::ConvertClickHouseBlockToRowBatch( + const clickhouse::Block& block, bool /*is_last_block*/) { + auto num_rows = block.GetRowCount(); + auto num_cols = block.GetColumnCount(); + + // Create output row descriptor if this is the first block + if (current_block_index_ == 0) { + std::vector col_types; + for (size_t i = 0; i < num_cols; ++i) { + PX_ASSIGN_OR_RETURN(auto pixie_type, + ClickHouseTypeToPixieType(block[i]->Type())); + col_types.push_back(pixie_type); + } + // Note: In a real implementation, we would get column names from the plan + // or from ClickHouse metadata + } + + auto row_batch = std::make_unique(*output_descriptor_, num_rows); + + // Convert each column + for (size_t col_idx = 0; col_idx < num_cols; ++col_idx) { + const auto& ch_column = block[col_idx]; + const auto& type_name = ch_column->Type()->GetName(); + + // For now, implement conversion for common types + // This is where column type inference happens + + // Integer types - all map to INT64 in Pixie + if (type_name == "UInt8") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(static_cast(typed_col->At(i))); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "UInt16") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(static_cast(typed_col->At(i))); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "UInt32") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(static_cast(typed_col->At(i))); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "UInt64") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(static_cast(typed_col->At(i))); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "Int8") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(static_cast(typed_col->At(i))); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "Int16") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(static_cast(typed_col->At(i))); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "Int32") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(static_cast(typed_col->At(i))); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "Int64") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(typed_col->At(i)); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "String") { + auto typed_col = ch_column->As(); + arrow::StringBuilder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + + for (size_t i = 0; i < num_rows; ++i) { + // Convert string_view to string + std::string value(typed_col->At(i)); + PX_RETURN_IF_ERROR(builder.Append(value)); + } + + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + + } else if (type_name == "Float32") { + auto typed_col = ch_column->As(); + arrow::DoubleBuilder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(static_cast(typed_col->At(i))); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "Float64") { + auto typed_col = ch_column->As(); + arrow::DoubleBuilder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(typed_col->At(i)); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "Bool") { + auto typed_col = ch_column->As(); + arrow::BooleanBuilder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + for (size_t i = 0; i < num_rows; ++i) { + builder.UnsafeAppend(typed_col->At(i) != 0); + } + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name == "DateTime") { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + + for (size_t i = 0; i < num_rows; ++i) { + // Convert DateTime (seconds since epoch) to nanoseconds + int64_t ns = static_cast(typed_col->At(i)) * 1000000000LL; + builder.UnsafeAppend(ns); + } + + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + + } else { + return error::InvalidArgument("Unsupported ClickHouse type for conversion: $0", type_name); + } + } + + // Set end-of-window and end-of-stream flags + // Don't set them here - they should be set in GenerateNextImpl + row_batch->set_eow(false); + row_batch->set_eos(false); + + return row_batch; +} + +Status ClickHouseSourceNode::ExecuteQuery() { + if (query_executed_) { + return Status::OK(); + } + + try { + client_->Select(query_, + [this](const clickhouse::Block& block) { + // Only store non-empty blocks + if (block.GetRowCount() > 0) { + result_blocks_.push_back(block); + } + } + ); + query_executed_ = true; + } catch (const std::exception& e) { + return error::Internal("Failed to execute ClickHouse query: $0", e.what()); + } + + return Status::OK(); +} + +Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { + // Execute query if not done yet + PX_RETURN_IF_ERROR(ExecuteQuery()); + + // Check if we have more blocks to process + if (current_block_index_ >= result_blocks_.size()) { + // Send empty batch with eos=true + PX_ASSIGN_OR_RETURN(auto empty_batch, RowBatch::WithZeroRows(*output_descriptor_, true, true)); + PX_RETURN_IF_ERROR(SendRowBatchToChildren(exec_state, *empty_batch)); + return Status::OK(); + } + + // Convert current block to row batch + bool is_last_block = (current_block_index_ == result_blocks_.size() - 1); + PX_ASSIGN_OR_RETURN(auto row_batch, + ConvertClickHouseBlockToRowBatch(result_blocks_[current_block_index_], + is_last_block)); + + // Update stats + rows_processed_ += row_batch->num_rows(); + bytes_processed_ += row_batch->NumBytes(); + + // Send to children + PX_RETURN_IF_ERROR(SendRowBatchToChildren(exec_state, *row_batch)); + + current_block_index_++; + + return Status::OK(); +} + +bool ClickHouseSourceNode::NextBatchReady() { + // For now, we execute the entire query at once + // In the future, we could support streaming + return HasBatchesRemaining(); +} + +} // namespace exec +} // namespace carnot +} // namespace px \ No newline at end of file diff --git a/src/carnot/exec/clickhouse_source_node.h b/src/carnot/exec/clickhouse_source_node.h new file mode 100644 index 00000000000..b509d654faf --- /dev/null +++ b/src/carnot/exec/clickhouse_source_node.h @@ -0,0 +1,96 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include +#include + +#include + +#include "src/carnot/exec/exec_node.h" +#include "src/carnot/exec/exec_state.h" +#include "src/carnot/plan/operators.h" +#include "src/common/base/base.h" +#include "src/common/base/status.h" +#include "src/shared/types/types.h" +#include "src/table_store/schema/row_batch.h" + +namespace px { +namespace carnot { +namespace exec { + +using table_store::schema::RowBatch; +using table_store::schema::RowDescriptor; + +class ClickHouseSourceNode : public SourceNode { + public: + ClickHouseSourceNode() = default; + virtual ~ClickHouseSourceNode() = default; + + bool NextBatchReady() override; + + protected: + std::string DebugStringImpl() override; + Status InitImpl(const plan::Operator& plan_node) override; + Status PrepareImpl(ExecState* exec_state) override; + Status OpenImpl(ExecState* exec_state) override; + Status CloseImpl(ExecState* exec_state) override; + Status GenerateNextImpl(ExecState* exec_state) override; + + private: + // Convert ClickHouse column types to Pixie data types + StatusOr ClickHouseTypeToPixieType(const clickhouse::TypeRef& ch_type); + + // Convert ClickHouse block to Pixie RowBatch + StatusOr> ConvertClickHouseBlockToRowBatch( + const clickhouse::Block& block, bool is_last_block); + + // Execute the query and fetch results + Status ExecuteQuery(); + + // Connection information + std::string host_; + int port_; + std::string username_; + std::string password_; + std::string database_; + std::string query_; + + // Batch size configuration + size_t batch_size_ = 1024; + + // ClickHouse client + std::unique_ptr client_; + + // Query results + std::vector result_blocks_; + size_t current_block_index_ = 0; + bool query_executed_ = false; + + // Streaming support (future enhancement) + bool streaming_ = false; + + // Plan node + std::unique_ptr plan_node_; +}; + +} // namespace exec +} // namespace carnot +} // namespace px \ No newline at end of file diff --git a/src/carnot/exec/clickhouse_source_node_test.cc b/src/carnot/exec/clickhouse_source_node_test.cc index c79d178f433..5eddd377c24 100644 --- a/src/carnot/exec/clickhouse_source_node_test.cc +++ b/src/carnot/exec/clickhouse_source_node_test.cc @@ -16,29 +16,41 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include -#include +#include "src/carnot/exec/clickhouse_source_node.h" + +#include #include -#include -#include +#include #include +#include +#include #include #include #include +#include #include +#include "src/carnot/exec/test_utils.h" +#include "src/carnot/planpb/plan.pb.h" +#include "src/carnot/planpb/test_proto.h" +#include "src/carnot/udf/registry.h" #include "src/common/testing/test_utils/container_runner.h" #include "src/common/testing/testing.h" -#include "src/common/base/logging.h" +#include "src/shared/types/arrow_adapter.h" +#include "src/shared/types/column_wrapper.h" +#include "src/shared/types/types.h" +#include "src/shared/types/typespb/types.pb.h" namespace px { namespace carnot { namespace exec { +using table_store::Table; +using table_store::schema::RowBatch; +using table_store::schema::RowDescriptor; using ::testing::_; -using ::testing::ElementsAre; class ClickHouseSourceNodeTest : public ::testing::Test { protected: @@ -48,10 +60,17 @@ class ClickHouseSourceNodeTest : public ::testing::Test { static constexpr int kClickHousePort = 9000; void SetUp() override { + // Set up function registry and exec state + func_registry_ = std::make_unique("test_registry"); + auto table_store = std::make_shared(); + exec_state_ = std::make_unique( + func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, + MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + + // Start ClickHouse container clickhouse_server_ = std::make_unique( px::testing::BazelRunfilePath(kClickHouseImage), "clickhouse_test", kClickHouseReadyMessage); - // Start ClickHouse server with necessary options std::vector options = { absl::Substitute("--publish=$0:$0", kClickHousePort), "--env=CLICKHOUSE_PASSWORD=test_password", @@ -59,17 +78,28 @@ class ClickHouseSourceNodeTest : public ::testing::Test { }; ASSERT_OK(clickhouse_server_->Run( - std::chrono::seconds{60}, // timeout + std::chrono::seconds{60}, options, - {}, // args - true, // use_host_pid_namespace - std::chrono::seconds{300} // container_lifetime + {}, + true, + std::chrono::seconds{300} )); - // Give ClickHouse more time to fully initialize + // Give ClickHouse time to initialize std::this_thread::sleep_for(std::chrono::seconds(5)); - // Create ClickHouse client with retry logic (using default auth) + // Create ClickHouse client for test data setup + SetupClickHouseClient(); + CreateTestTable(); + } + + void TearDown() override { + if (client_) { + client_.reset(); + } + } + + void SetupClickHouseClient() { clickhouse::ClientOptions client_options; client_options.SetHost("localhost"); client_options.SetPort(kClickHousePort); @@ -77,46 +107,29 @@ class ClickHouseSourceNodeTest : public ::testing::Test { client_options.SetPassword("test_password"); client_options.SetDefaultDatabase("default"); - // Retry connection a few times const int kMaxRetries = 5; for (int i = 0; i < kMaxRetries; ++i) { LOG(INFO) << "Attempting to connect to ClickHouse (attempt " << (i + 1) << "/" << kMaxRetries << ")..."; try { client_ = std::make_unique(client_options); - // Test the connection with a simple query client_->Execute("SELECT 1"); - break; // Connection successful + break; } catch (const std::exception& e) { - LOG(WARNING) << "Failed to connect to ClickHouse (attempt " << (i + 1) - << "/" << kMaxRetries << "): " << e.what(); + LOG(WARNING) << "Failed to connect: " << e.what(); if (i < kMaxRetries - 1) { std::this_thread::sleep_for(std::chrono::seconds(2)); } else { - throw; // Re-throw on last attempt + throw; } } } - - // Create test table - CreateTestTable(); - } - - void TearDown() override { - if (client_) { - client_.reset(); - } - if (clickhouse_server_) { - clickhouse_server_->Wait(); - } } void CreateTestTable() { try { - // Drop table if exists client_->Execute("DROP TABLE IF EXISTS test_table"); - // Create test table client_->Execute(R"( CREATE TABLE test_table ( id UInt64, @@ -127,13 +140,11 @@ class ClickHouseSourceNodeTest : public ::testing::Test { ORDER BY id )"); - // Insert test data auto id_col = std::make_shared(); auto name_col = std::make_shared(); auto value_col = std::make_shared(); auto timestamp_col = std::make_shared(); - // Add test data std::time_t now = std::time(nullptr); id_col->Append(1); name_col->Append("test1"); @@ -167,84 +178,200 @@ class ClickHouseSourceNodeTest : public ::testing::Test { std::unique_ptr clickhouse_server_; std::unique_ptr client_; + std::unique_ptr exec_state_; + std::unique_ptr func_registry_; }; TEST_F(ClickHouseSourceNodeTest, BasicQuery) { - // Test basic SELECT query - std::vector ids; - std::vector names; - std::vector values; - - client_->Select("SELECT id, name, value FROM test_table ORDER BY id", - [&](const clickhouse::Block& block) { - for (size_t i = 0; i < block.GetRowCount(); ++i) { - ids.push_back(block[0]->As()->At(i)); - names.emplace_back(block[1]->As()->At(i)); - values.push_back(block[2]->As()->At(i)); - } - } - ); + // Create ClickHouse source operator proto + auto op_proto = planpb::testutils::CreateClickHouseSourceOperatorPB(); + std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op_proto, 1); + + // Define expected output schema + RowDescriptor output_rd({types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); + + // Create node tester + auto tester = exec::ExecNodeTester( + *plan_node, output_rd, std::vector({}), exec_state_.get()); + + // Verify state machine behavior + EXPECT_TRUE(tester.node()->HasBatchesRemaining()); + + // First call should return data + tester.GenerateNextResult().ExpectRowBatch( + RowBatchBuilder(output_rd, 3, /*eow*/ false, /*eos*/ false) + .AddColumn({1, 2, 3}) + .AddColumn({"test1", "test2", "test3"}) + .AddColumn({10.5, 20.5, 30.5}) + .get()); + + // ClickHouse returns all data at once, so next call should return empty batch with eos + EXPECT_TRUE(tester.node()->HasBatchesRemaining()); + tester.GenerateNextResult().ExpectRowBatch( + RowBatchBuilder(output_rd, 0, /*eow*/ true, /*eos*/ true) + .AddColumn({}) + .AddColumn({}) + .AddColumn({}) + .get()); - EXPECT_THAT(ids, ElementsAre(1, 2, 3)); - EXPECT_THAT(names, ElementsAre("test1", "test2", "test3")); - EXPECT_THAT(values, ElementsAre(10.5, 20.5, 30.5)); + EXPECT_FALSE(tester.node()->HasBatchesRemaining()); + tester.Close(); + + // Verify metrics + EXPECT_EQ(3, tester.node()->RowsProcessed()); + EXPECT_GT(tester.node()->BytesProcessed(), 0); } -TEST_F(ClickHouseSourceNodeTest, FilteredQuery) { - // Test SELECT with WHERE clause - std::vector ids; - std::vector names; - - client_->Select("SELECT id, name FROM test_table WHERE value > 15.0 ORDER BY id", - [&](const clickhouse::Block& block) { - for (size_t i = 0; i < block.GetRowCount(); ++i) { - ids.push_back(block[0]->As()->At(i)); - names.emplace_back(block[1]->As()->At(i)); - } - } - ); +TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { + // Create a table with no data + client_->Execute("DROP TABLE IF EXISTS empty_table"); + client_->Execute(R"( + CREATE TABLE empty_table ( + id UInt64, + name String, + value Float64 + ) ENGINE = MergeTree() + ORDER BY id + )"); + + // Create operator that queries empty table + planpb::Operator op; + op.set_op_type(planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR); + auto* ch_op = op.mutable_clickhouse_source_op(); + ch_op->set_host("localhost"); + ch_op->set_port(kClickHousePort); + ch_op->set_username("default"); + ch_op->set_password("test_password"); + ch_op->set_database("default"); + ch_op->set_query("SELECT id, name, value FROM empty_table"); + ch_op->set_batch_size(1024); + ch_op->set_streaming(false); + ch_op->add_column_names("id"); + ch_op->add_column_names("name"); + ch_op->add_column_names("value"); + ch_op->add_column_types(types::DataType::INT64); + ch_op->add_column_types(types::DataType::STRING); + ch_op->add_column_types(types::DataType::FLOAT64); + + std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op, 1); + RowDescriptor output_rd({types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); + + auto tester = exec::ExecNodeTester( + *plan_node, output_rd, std::vector({}), exec_state_.get()); - EXPECT_THAT(ids, ElementsAre(2, 3)); - EXPECT_THAT(names, ElementsAre("test2", "test3")); + EXPECT_TRUE(tester.node()->HasBatchesRemaining()); + + // Should return empty batch with eos=true + tester.GenerateNextResult().ExpectRowBatch( + RowBatchBuilder(output_rd, 0, /*eow*/ true, /*eos*/ true) + .AddColumn({}) + .AddColumn({}) + .AddColumn({}) + .get()); + + EXPECT_FALSE(tester.node()->HasBatchesRemaining()); + tester.Close(); + + EXPECT_EQ(0, tester.node()->RowsProcessed()); + EXPECT_EQ(0, tester.node()->BytesProcessed()); } -TEST_F(ClickHouseSourceNodeTest, AggregateQuery) { - // Test aggregate functions - double sum_value = 0; - uint64_t count = 0; - bool query_executed = false; - - try { - client_->Select("SELECT SUM(value), COUNT(*) FROM test_table", - [&](const clickhouse::Block& block) { - if (block.GetRowCount() > 0) { - sum_value = block[0]->As()->At(0); - count = block[1]->As()->At(0); - query_executed = true; - } - } - ); - } catch (const std::exception& e) { - LOG(ERROR) << "Aggregate query failed: " << e.what(); - FAIL() << "Aggregate query failed: " << e.what(); - } +TEST_F(ClickHouseSourceNodeTest, FilteredQuery) { + // Create operator with WHERE clause + planpb::Operator op; + op.set_op_type(planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR); + auto* ch_op = op.mutable_clickhouse_source_op(); + ch_op->set_host("localhost"); + ch_op->set_port(kClickHousePort); + ch_op->set_username("default"); + ch_op->set_password("test_password"); + ch_op->set_database("default"); + ch_op->set_query("SELECT id, name, value FROM test_table WHERE value > 15.0 ORDER BY id"); + ch_op->set_batch_size(1024); + ch_op->set_streaming(false); + ch_op->add_column_names("id"); + ch_op->add_column_names("name"); + ch_op->add_column_names("value"); + ch_op->add_column_types(types::DataType::INT64); + ch_op->add_column_types(types::DataType::STRING); + ch_op->add_column_types(types::DataType::FLOAT64); + + std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op, 1); + RowDescriptor output_rd({types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); + + auto tester = exec::ExecNodeTester( + *plan_node, output_rd, std::vector({}), exec_state_.get()); + + EXPECT_TRUE(tester.node()->HasBatchesRemaining()); - EXPECT_TRUE(query_executed) << "Query callback was not executed"; - EXPECT_DOUBLE_EQ(sum_value, 61.5); // 10.5 + 20.5 + 30.5 - EXPECT_EQ(count, 3); + // Should return filtered results + tester.GenerateNextResult().ExpectRowBatch( + RowBatchBuilder(output_rd, 2, /*eow*/ false, /*eos*/ false) + .AddColumn({2, 3}) + .AddColumn({"test2", "test3"}) + .AddColumn({20.5, 30.5}) + .get()); + + // Next call should return empty batch with eos + tester.GenerateNextResult().ExpectRowBatch( + RowBatchBuilder(output_rd, 0, /*eow*/ true, /*eos*/ true) + .AddColumn({}) + .AddColumn({}) + .AddColumn({}) + .get()); + + EXPECT_FALSE(tester.node()->HasBatchesRemaining()); + tester.Close(); + + EXPECT_EQ(2, tester.node()->RowsProcessed()); + EXPECT_GT(tester.node()->BytesProcessed(), 0); } -TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { - // Test query that returns no rows - int row_count = 0; +TEST_F(ClickHouseSourceNodeTest, AggregateQuery) { + // Create operator with aggregate query + planpb::Operator op; + op.set_op_type(planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR); + auto* ch_op = op.mutable_clickhouse_source_op(); + ch_op->set_host("localhost"); + ch_op->set_port(kClickHousePort); + ch_op->set_username("default"); + ch_op->set_password("test_password"); + ch_op->set_database("default"); + ch_op->set_query("SELECT SUM(value) as sum_value, COUNT(*) as count FROM test_table"); + ch_op->set_batch_size(1024); + ch_op->set_streaming(false); + ch_op->add_column_names("sum_value"); + ch_op->add_column_names("count"); + ch_op->add_column_types(types::DataType::FLOAT64); + ch_op->add_column_types(types::DataType::INT64); - client_->Select("SELECT * FROM test_table WHERE id > 1000", - [&](const clickhouse::Block& block) { - row_count += block.GetRowCount(); - } - ); + std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op, 1); + RowDescriptor output_rd({types::DataType::FLOAT64, types::DataType::INT64}); + + auto tester = exec::ExecNodeTester( + *plan_node, output_rd, std::vector({}), exec_state_.get()); + + EXPECT_TRUE(tester.node()->HasBatchesRemaining()); + + // Should return aggregate result + tester.GenerateNextResult().ExpectRowBatch( + RowBatchBuilder(output_rd, 1, /*eow*/ false, /*eos*/ false) + .AddColumn({61.5}) // 10.5 + 20.5 + 30.5 + .AddColumn({3}) + .get()); + + // Next call should return empty batch with eos + tester.GenerateNextResult().ExpectRowBatch( + RowBatchBuilder(output_rd, 0, /*eow*/ true, /*eos*/ true) + .AddColumn({}) + .AddColumn({}) + .get()); + + EXPECT_FALSE(tester.node()->HasBatchesRemaining()); + tester.Close(); - EXPECT_EQ(row_count, 0); + EXPECT_EQ(1, tester.node()->RowsProcessed()); + EXPECT_GT(tester.node()->BytesProcessed(), 0); } } // namespace exec diff --git a/src/carnot/plan/operators.cc b/src/carnot/plan/operators.cc index bfdb43427f4..bf998e4b2a4 100644 --- a/src/carnot/plan/operators.cc +++ b/src/carnot/plan/operators.cc @@ -83,6 +83,8 @@ std::unique_ptr Operator::FromProto(const planpb::Operator& pb, int64_ return CreateOperator(id, pb.udtf_source_op()); case planpb::EMPTY_SOURCE_OPERATOR: return CreateOperator(id, pb.empty_source_op()); + case planpb::CLICKHOUSE_SOURCE_OPERATOR: + return CreateOperator(id, pb.clickhouse_source_op()); case planpb::OTEL_EXPORT_SINK_OPERATOR: return CreateOperator(id, pb.otel_sink_op()); default: @@ -709,6 +711,34 @@ StatusOr EmptySourceOperator::OutputRelation( return r; } +/** + * ClickHouseSourceOperator implementation. + */ + +std::string ClickHouseSourceOperator::DebugString() const { + return absl::Substitute("Op:ClickHouseSource(query=$0)", pb_.query()); +} + +Status ClickHouseSourceOperator::Init(const planpb::ClickHouseSourceOperator& pb) { + pb_ = pb; + is_initialized_ = true; + return Status::OK(); +} + +StatusOr ClickHouseSourceOperator::OutputRelation( + const table_store::schema::Schema&, const PlanState&, + const std::vector& input_ids) const { + DCHECK(is_initialized_) << "Not initialized"; + if (!input_ids.empty()) { + return error::InvalidArgument("Source operator cannot have any inputs"); + } + table_store::schema::Relation r; + for (int i = 0; i < pb_.column_types_size(); ++i) { + r.AddColumn(static_cast(pb_.column_types(i)), pb_.column_names(i)); + } + return r; +} + /** * OTel Export Sink Operator Implementation. */ diff --git a/src/carnot/plan/operators.h b/src/carnot/plan/operators.h index 8586f6eb976..143ecf53674 100644 --- a/src/carnot/plan/operators.h +++ b/src/carnot/plan/operators.h @@ -359,6 +359,41 @@ class EmptySourceOperator : public Operator { std::vector column_idxs_; }; +class ClickHouseSourceOperator : public Operator { + public: + explicit ClickHouseSourceOperator(int64_t id) : Operator(id, planpb::CLICKHOUSE_SOURCE_OPERATOR) {} + ~ClickHouseSourceOperator() override = default; + + StatusOr OutputRelation( + const table_store::schema::Schema& schema, const PlanState& state, + const std::vector& input_ids) const override; + Status Init(const planpb::ClickHouseSourceOperator& pb); + std::string DebugString() const override; + + std::string host() const { return pb_.host(); } + int32_t port() const { return pb_.port(); } + std::string username() const { return pb_.username(); } + std::string password() const { return pb_.password(); } + std::string database() const { return pb_.database(); } + std::string query() const { return pb_.query(); } + int32_t batch_size() const { return pb_.batch_size(); } + bool streaming() const { return pb_.streaming(); } + std::vector column_names() const { + return std::vector(pb_.column_names().begin(), pb_.column_names().end()); + } + std::vector column_types() const { + std::vector types; + types.reserve(pb_.column_types_size()); + for (const auto& type : pb_.column_types()) { + types.push_back(static_cast(type)); + } + return types; + } + + private: + planpb::ClickHouseSourceOperator pb_; +}; + class OTelExportSinkOperator : public Operator { public: explicit OTelExportSinkOperator(int64_t id) : Operator(id, planpb::OTEL_EXPORT_SINK_OPERATOR) {} diff --git a/src/carnot/planpb/plan.proto b/src/carnot/planpb/plan.proto index c7bcb552dda..a906e54c3bf 100644 --- a/src/carnot/planpb/plan.proto +++ b/src/carnot/planpb/plan.proto @@ -104,6 +104,7 @@ enum OperatorType { GRPC_SOURCE_OPERATOR = 1100; UDTF_SOURCE_OPERATOR = 1200; EMPTY_SOURCE_OPERATOR = 1300; + CLICKHOUSE_SOURCE_OPERATOR = 1400; // Regular operators are range 2000 - 10000. MAP_OPERATOR = 2000; AGGREGATE_OPERATOR = 2100; @@ -149,6 +150,8 @@ message Operator { EmptySourceOperator empty_source_op = 13; // OTelExportSinkOperator writes the input table to an OpenTelemetry endpoint. OTelExportSinkOperator otel_sink_op = 14 [ (gogoproto.customname) = "OTelSinkOp" ]; + // ClickHouseSourceOperator reads data from a ClickHouse database. + ClickHouseSourceOperator clickhouse_source_op = 15; } } @@ -358,6 +361,30 @@ message EmptySourceOperator { repeated px.types.DataType column_types = 2; } +// Source operator that queries a ClickHouse database. +message ClickHouseSourceOperator { + // Connection parameters + string host = 1; + int32 port = 2; + string username = 3; + string password = 4; + string database = 5; + + // Query to execute + string query = 6; + + // The names for the columns (can be auto-detected from query) + repeated string column_names = 7; + // The types of the columns (can be auto-detected from query) + repeated px.types.DataType column_types = 8; + + // Batch size for fetching results + int32 batch_size = 9; + + // Whether to stream results (future enhancement) + bool streaming = 10; +} + // OTelLog maps operator columns to each field in the OpenTelemetry Log configuration. // The mapping ensures that each row of the table will be a separate log. // Maps to the config described here: diff --git a/src/carnot/planpb/test_proto.h b/src/carnot/planpb/test_proto.h index 0ca5a1c37a4..1228e45b672 100644 --- a/src/carnot/planpb/test_proto.h +++ b/src/carnot/planpb/test_proto.h @@ -195,6 +195,23 @@ column_names: "usage" streaming: false )"; +constexpr char kClickHouseSourceOperator[] = R"( +host: "localhost" +port: 9000 +username: "default" +password: "test_password" +database: "default" +query: "SELECT id, name, value FROM test_table ORDER BY id" +batch_size: 1024 +streaming: false +column_names: "id" +column_names: "name" +column_names: "value" +column_types: INT64 +column_types: STRING +column_types: FLOAT64 +)"; + constexpr char kBlockingAggOperator1[] = R"( windowed: false values { @@ -1328,6 +1345,14 @@ planpb::Operator CreateTestSource1PB(const std::string& table_name = "cpu") { return op; } +planpb::Operator CreateClickHouseSourceOperatorPB() { + planpb::Operator op; + auto op_proto = absl::Substitute(kOperatorProtoTmpl, "CLICKHOUSE_SOURCE_OPERATOR", + "clickhouse_source_op", kClickHouseSourceOperator); + CHECK(google::protobuf::TextFormat::MergeFromString(op_proto, &op)) << "Failed to parse proto"; + return op; +} + planpb::Operator CreateTestStreamingSource1PB(const std::string& table_name = "cpu") { planpb::Operator op; auto mem_proto = absl::Substitute(kStreamingMemSourceOperator1, table_name); @@ -1378,6 +1403,32 @@ planpb::Operator CreateTestSink1PB() { return op; } +// Create a test ClickHouse source operator with hardcoded values +planpb::Operator CreateTestClickHouseSourcePB() { + constexpr char kClickHouseSourceOperator[] = R"( + host: "localhost" + port: 9000 + username: "default" + password: "test_password" + database: "default" + query: "SELECT id, name, value FROM test_table ORDER BY id" + batch_size: 1024 + streaming: false + column_names: "id" + column_names: "name" + column_names: "value" + column_types: UINT64 + column_types: STRING + column_types: FLOAT64 + )"; + + planpb::Operator op; + auto op_proto = absl::Substitute(kOperatorProtoTmpl, "CLICKHOUSE_SOURCE_OPERATOR", + "clickhouse_source_op", kClickHouseSourceOperator); + CHECK(google::protobuf::TextFormat::MergeFromString(op_proto, &op)) << "Failed to parse proto"; + return op; +} + planpb::Operator CreateTestSink2PB() { planpb::Operator op; auto op_proto = absl::Substitute(kOperatorProtoTmpl, "MEMORY_SINK_OPERATOR", "mem_sink_op", From 6d6fe708c58b78a0acb3c206067f191f7568d1ab Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 7 Sep 2025 06:00:05 +0000 Subject: [PATCH 60/86] Add time based filtering and use batch size from plan node Signed-off-by: Dom Del Nano --- src/carnot/exec/clickhouse_source_node.cc | 235 ++++++++++++------ src/carnot/exec/clickhouse_source_node.h | 46 ++-- .../exec/clickhouse_source_node_test.cc | 211 ++++++---------- src/carnot/planpb/test_proto.h | 10 +- 4 files changed, 275 insertions(+), 227 deletions(-) diff --git a/src/carnot/exec/clickhouse_source_node.cc b/src/carnot/exec/clickhouse_source_node.cc index 675435943a1..01a70627e94 100644 --- a/src/carnot/exec/clickhouse_source_node.cc +++ b/src/carnot/exec/clickhouse_source_node.cc @@ -20,7 +20,10 @@ #include #include +#include +#include +#include #include #include "src/carnot/planpb/plan.pb.h" @@ -33,33 +36,40 @@ namespace carnot { namespace exec { std::string ClickHouseSourceNode::DebugStringImpl() { - return absl::Substitute("Exec::ClickHouseSourceNode: ", - query_, output_descriptor_->DebugString()); + return absl::Substitute("Exec::ClickHouseSourceNode: ", base_query_, + output_descriptor_->DebugString()); } Status ClickHouseSourceNode::InitImpl(const plan::Operator& plan_node) { CHECK(plan_node.op_type() == planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR); const auto* source_plan_node = static_cast(&plan_node); - + // Copy the plan node to local object plan_node_ = std::make_unique(*source_plan_node); - + // Extract connection parameters from plan node host_ = plan_node_->host(); port_ = plan_node_->port(); username_ = plan_node_->username(); password_ = plan_node_->password(); database_ = plan_node_->database(); - query_ = plan_node_->query(); + base_query_ = plan_node_->query(); batch_size_ = plan_node_->batch_size(); streaming_ = plan_node_->streaming(); - + + // Initialize cursor state + current_offset_ = 0; + has_more_data_ = true; + current_block_index_ = 0; + + // TODO(ddelnano): Extract time column and start/stop times from the plan node + // For now, use timestamp column for time filtering + time_column_ = "timestamp"; + return Status::OK(); } -Status ClickHouseSourceNode::PrepareImpl(ExecState*) { - return Status::OK(); -} +Status ClickHouseSourceNode::PrepareImpl(ExecState*) { return Status::OK(); } Status ClickHouseSourceNode::OpenImpl(ExecState*) { // Create ClickHouse client @@ -69,58 +79,64 @@ Status ClickHouseSourceNode::OpenImpl(ExecState*) { options.SetUser(username_); options.SetPassword(password_); options.SetDefaultDatabase(database_); - + try { client_ = std::make_unique(options); } catch (const std::exception& e) { return error::Internal("Failed to create ClickHouse client: $0", e.what()); } - + return Status::OK(); } Status ClickHouseSourceNode::CloseImpl(ExecState*) { client_.reset(); - result_blocks_.clear(); + current_batch_blocks_.clear(); + + // Reset cursor state + current_offset_ = 0; + current_block_index_ = 0; + has_more_data_ = true; + return Status::OK(); } StatusOr ClickHouseSourceNode::ClickHouseTypeToPixieType( const clickhouse::TypeRef& ch_type) { const auto& type_name = ch_type->GetName(); - + // Integer types - Pixie only supports INT64 - if (type_name == "UInt8" || type_name == "UInt16" || type_name == "UInt32" || - type_name == "UInt64" || type_name == "Int8" || type_name == "Int16" || + if (type_name == "UInt8" || type_name == "UInt16" || type_name == "UInt32" || + type_name == "UInt64" || type_name == "Int8" || type_name == "Int16" || type_name == "Int32" || type_name == "Int64") { return types::DataType::INT64; } - + // UInt128 if (type_name == "UInt128") { return types::DataType::UINT128; } - + // Floating point types - Pixie only supports FLOAT64 if (type_name == "Float32" || type_name == "Float64") { return types::DataType::FLOAT64; } - + // String types if (type_name == "String" || type_name == "FixedString") { return types::DataType::STRING; } - + // Date/time types if (type_name == "DateTime" || type_name == "DateTime64") { return types::DataType::TIME64NS; } - + // Boolean if (type_name == "Bool") { return types::DataType::BOOLEAN; } - + return error::InvalidArgument("Unsupported ClickHouse type: $0", type_name); } @@ -128,29 +144,28 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock const clickhouse::Block& block, bool /*is_last_block*/) { auto num_rows = block.GetRowCount(); auto num_cols = block.GetColumnCount(); - + // Create output row descriptor if this is the first block if (current_block_index_ == 0) { std::vector col_types; for (size_t i = 0; i < num_cols; ++i) { - PX_ASSIGN_OR_RETURN(auto pixie_type, - ClickHouseTypeToPixieType(block[i]->Type())); + PX_ASSIGN_OR_RETURN(auto pixie_type, ClickHouseTypeToPixieType(block[i]->Type())); col_types.push_back(pixie_type); } // Note: In a real implementation, we would get column names from the plan // or from ClickHouse metadata } - + auto row_batch = std::make_unique(*output_descriptor_, num_rows); - + // Convert each column for (size_t col_idx = 0; col_idx < num_cols; ++col_idx) { const auto& ch_column = block[col_idx]; const auto& type_name = ch_column->Type()->GetName(); - + // For now, implement conversion for common types // This is where column type inference happens - + // Integer types - all map to INT64 in Pixie if (type_name == "UInt8") { auto typed_col = ch_column->As(); @@ -236,17 +251,17 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock auto typed_col = ch_column->As(); arrow::StringBuilder builder; PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); - + for (size_t i = 0; i < num_rows; ++i) { // Convert string_view to string std::string value(typed_col->At(i)); PX_RETURN_IF_ERROR(builder.Append(value)); } - + std::shared_ptr array; PX_RETURN_IF_ERROR(builder.Finish(&array)); PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); - + } else if (type_name == "Float32") { auto typed_col = ch_column->As(); arrow::DoubleBuilder builder; @@ -281,88 +296,164 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock auto typed_col = ch_column->As(); arrow::Int64Builder builder; PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); - + for (size_t i = 0; i < num_rows; ++i) { // Convert DateTime (seconds since epoch) to nanoseconds int64_t ns = static_cast(typed_col->At(i)) * 1000000000LL; builder.UnsafeAppend(ns); } - + std::shared_ptr array; PX_RETURN_IF_ERROR(builder.Finish(&array)); PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); - + } else { return error::InvalidArgument("Unsupported ClickHouse type for conversion: $0", type_name); } } - + // Set end-of-window and end-of-stream flags // Don't set them here - they should be set in GenerateNextImpl row_batch->set_eow(false); row_batch->set_eos(false); - + return row_batch; } -Status ClickHouseSourceNode::ExecuteQuery() { - if (query_executed_) { +std::string ClickHouseSourceNode::BuildQuery() { + std::string query = base_query_; + std::string where_clause; + std::vector conditions; + + // Add time filtering if start/stop times are specified and time column is set + if (!time_column_.empty()) { + if (start_time_.has_value()) { + conditions.push_back(absl::Substitute("$0 >= $1", time_column_, start_time_.value())); + } + if (stop_time_.has_value()) { + conditions.push_back(absl::Substitute("$0 <= $1", time_column_, stop_time_.value())); + } + } + + // Check if the base query already has a WHERE clause + std::string lower_query = query; + std::transform(lower_query.begin(), lower_query.end(), lower_query.begin(), ::tolower); + bool has_where = lower_query.find(" where ") != std::string::npos; + + if (!conditions.empty()) { + if (has_where) { + where_clause = " AND " + absl::StrJoin(conditions, " AND "); + } else { + where_clause = " WHERE " + absl::StrJoin(conditions, " AND "); + } + query += where_clause; + } + + // Add ORDER BY clause (needed for consistent pagination) + // If no ORDER BY exists, add one - prefer time column if available, otherwise use first column + if (lower_query.find(" order by ") == std::string::npos) { + if (!time_column_.empty()) { + query += absl::Substitute(" ORDER BY $0", time_column_); + } else { + // Fall back to ordering by first column for consistent pagination + query += " ORDER BY 1"; + } + } + + // Add LIMIT and OFFSET for pagination + query += absl::Substitute(" LIMIT $0 OFFSET $1", batch_size_, current_offset_); + + return query; +} + +Status ClickHouseSourceNode::ExecuteBatchQuery() { + // Clear previous batch results + current_batch_blocks_.clear(); + current_block_index_ = 0; + + if (!has_more_data_) { return Status::OK(); } - + + std::string query = BuildQuery(); + try { - client_->Select(query_, - [this](const clickhouse::Block& block) { - // Only store non-empty blocks - if (block.GetRowCount() > 0) { - result_blocks_.push_back(block); - } + size_t rows_received = 0; + client_->Select(query, [this, &rows_received](const clickhouse::Block& block) { + // Only store non-empty blocks + if (block.GetRowCount() > 0) { + current_batch_blocks_.push_back(block); + rows_received += block.GetRowCount(); } - ); - query_executed_ = true; + }); + + // Update cursor state + current_offset_ += rows_received; + if (rows_received < batch_size_) { + // We got fewer rows than requested, so no more data available + has_more_data_ = false; + } } catch (const std::exception& e) { - return error::Internal("Failed to execute ClickHouse query: $0", e.what()); + return error::Internal("Failed to execute ClickHouse batch query: $0", e.what()); } - + return Status::OK(); } Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { - // Execute query if not done yet - PX_RETURN_IF_ERROR(ExecuteQuery()); - - // Check if we have more blocks to process - if (current_block_index_ >= result_blocks_.size()) { - // Send empty batch with eos=true - PX_ASSIGN_OR_RETURN(auto empty_batch, RowBatch::WithZeroRows(*output_descriptor_, true, true)); - PX_RETURN_IF_ERROR(SendRowBatchToChildren(exec_state, *empty_batch)); - return Status::OK(); + // If we've processed all blocks in current batch, fetch the next batch + if (current_block_index_ >= current_batch_blocks_.size()) { + if (!has_more_data_) { + // No more data available - send empty batch with eos=true + PX_ASSIGN_OR_RETURN(auto empty_batch, + RowBatch::WithZeroRows(*output_descriptor_, true, true)); + PX_RETURN_IF_ERROR(SendRowBatchToChildren(exec_state, *empty_batch)); + return Status::OK(); + } + + // Fetch next batch from ClickHouse + PX_RETURN_IF_ERROR(ExecuteBatchQuery()); + + // If still no blocks after fetching, we're done + if (current_batch_blocks_.empty()) { + PX_ASSIGN_OR_RETURN(auto empty_batch, + RowBatch::WithZeroRows(*output_descriptor_, true, true)); + PX_RETURN_IF_ERROR(SendRowBatchToChildren(exec_state, *empty_batch)); + return Status::OK(); + } } - - // Convert current block to row batch - bool is_last_block = (current_block_index_ == result_blocks_.size() - 1); - PX_ASSIGN_OR_RETURN(auto row_batch, - ConvertClickHouseBlockToRowBatch(result_blocks_[current_block_index_], - is_last_block)); - + + // Process current block + const auto& current_block = current_batch_blocks_[current_block_index_]; + bool is_last_block = + (current_block_index_ == current_batch_blocks_.size() - 1) && !has_more_data_; + + PX_ASSIGN_OR_RETURN(auto row_batch, + ConvertClickHouseBlockToRowBatch(current_block, is_last_block)); + + // Set proper end-of-window and end-of-stream flags + if (is_last_block) { + row_batch->set_eow(true); + row_batch->set_eos(true); + } + // Update stats rows_processed_ += row_batch->num_rows(); bytes_processed_ += row_batch->NumBytes(); - + // Send to children PX_RETURN_IF_ERROR(SendRowBatchToChildren(exec_state, *row_batch)); - + current_block_index_++; - + return Status::OK(); } bool ClickHouseSourceNode::NextBatchReady() { - // For now, we execute the entire query at once - // In the future, we could support streaming - return HasBatchesRemaining(); + // We're ready if we have blocks in current batch or if we can fetch more data + return (current_block_index_ < current_batch_blocks_.size()) || has_more_data_; } } // namespace exec } // namespace carnot -} // namespace px \ No newline at end of file +} // namespace px diff --git a/src/carnot/exec/clickhouse_source_node.h b/src/carnot/exec/clickhouse_source_node.h index b509d654faf..3a71afe6e2e 100644 --- a/src/carnot/exec/clickhouse_source_node.h +++ b/src/carnot/exec/clickhouse_source_node.h @@ -18,12 +18,13 @@ #pragma once +#include + #include +#include #include #include -#include - #include "src/carnot/exec/exec_node.h" #include "src/carnot/exec/exec_state.h" #include "src/carnot/plan/operators.h" @@ -57,13 +58,16 @@ class ClickHouseSourceNode : public SourceNode { private: // Convert ClickHouse column types to Pixie data types StatusOr ClickHouseTypeToPixieType(const clickhouse::TypeRef& ch_type); - + // Convert ClickHouse block to Pixie RowBatch StatusOr> ConvertClickHouseBlockToRowBatch( const clickhouse::Block& block, bool is_last_block); - - // Execute the query and fetch results - Status ExecuteQuery(); + + // Execute a batch query + Status ExecuteBatchQuery(); + + // Build the query with time filtering and pagination + std::string BuildQuery(); // Connection information std::string host_; @@ -71,26 +75,32 @@ class ClickHouseSourceNode : public SourceNode { std::string username_; std::string password_; std::string database_; - std::string query_; - - // Batch size configuration + std::string base_query_; + + // Batch size and cursor tracking size_t batch_size_ = 1024; - + size_t current_offset_ = 0; + bool has_more_data_ = true; + + // Time filtering + std::optional start_time_; + std::optional stop_time_; + std::string time_column_; // Column to use for time filtering + // ClickHouse client std::unique_ptr client_; - - // Query results - std::vector result_blocks_; + + // Current batch results + std::vector current_batch_blocks_; size_t current_block_index_ = 0; - bool query_executed_ = false; - - // Streaming support (future enhancement) + + // Streaming support bool streaming_ = false; - + // Plan node std::unique_ptr plan_node_; }; } // namespace exec } // namespace carnot -} // namespace px \ No newline at end of file +} // namespace px diff --git a/src/carnot/exec/clickhouse_source_node_test.cc b/src/carnot/exec/clickhouse_source_node_test.cc index 5eddd377c24..fb6e98ee18a 100644 --- a/src/carnot/exec/clickhouse_source_node_test.cc +++ b/src/carnot/exec/clickhouse_source_node_test.cc @@ -18,20 +18,20 @@ #include "src/carnot/exec/clickhouse_source_node.h" +#include + #include +#include #include +#include #include #include -#include -#include #include #include #include #include -#include - #include "src/carnot/exec/test_utils.h" #include "src/carnot/planpb/plan.pb.h" #include "src/carnot/planpb/test_proto.h" @@ -54,11 +54,11 @@ using ::testing::_; class ClickHouseSourceNodeTest : public ::testing::Test { protected: - static constexpr char kClickHouseImage[] = + static constexpr char kClickHouseImage[] = "src/stirling/source_connectors/socket_tracer/testing/container_images/clickhouse.tar"; static constexpr char kClickHouseReadyMessage[] = "Ready for connections"; static constexpr int kClickHousePort = 9000; - + void SetUp() override { // Set up function registry and exec state func_registry_ = std::make_unique("test_registry"); @@ -66,39 +66,35 @@ class ClickHouseSourceNodeTest : public ::testing::Test { exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); - + // Start ClickHouse container - clickhouse_server_ = std::make_unique( - px::testing::BazelRunfilePath(kClickHouseImage), "clickhouse_test", kClickHouseReadyMessage); - + clickhouse_server_ = + std::make_unique(px::testing::BazelRunfilePath(kClickHouseImage), + "clickhouse_test", kClickHouseReadyMessage); + std::vector options = { absl::Substitute("--publish=$0:$0", kClickHousePort), "--env=CLICKHOUSE_PASSWORD=test_password", "--network=host", }; - - ASSERT_OK(clickhouse_server_->Run( - std::chrono::seconds{60}, - options, - {}, - true, - std::chrono::seconds{300} - )); - + + ASSERT_OK(clickhouse_server_->Run(std::chrono::seconds{60}, options, {}, true, + std::chrono::seconds{300})); + // Give ClickHouse time to initialize std::this_thread::sleep_for(std::chrono::seconds(5)); - + // Create ClickHouse client for test data setup SetupClickHouseClient(); CreateTestTable(); } - + void TearDown() override { if (client_) { client_.reset(); } } - + void SetupClickHouseClient() { clickhouse::ClientOptions client_options; client_options.SetHost("localhost"); @@ -106,11 +102,11 @@ class ClickHouseSourceNodeTest : public ::testing::Test { client_options.SetUser("default"); client_options.SetPassword("test_password"); client_options.SetDefaultDatabase("default"); - + const int kMaxRetries = 5; for (int i = 0; i < kMaxRetries; ++i) { - LOG(INFO) << "Attempting to connect to ClickHouse (attempt " << (i + 1) - << "/" << kMaxRetries << ")..."; + LOG(INFO) << "Attempting to connect to ClickHouse (attempt " << (i + 1) << "/" << kMaxRetries + << ")..."; try { client_ = std::make_unique(client_options); client_->Execute("SELECT 1"); @@ -125,11 +121,11 @@ class ClickHouseSourceNodeTest : public ::testing::Test { } } } - + void CreateTestTable() { try { client_->Execute("DROP TABLE IF EXISTS test_table"); - + client_->Execute(R"( CREATE TABLE test_table ( id UInt64, @@ -137,38 +133,39 @@ class ClickHouseSourceNodeTest : public ::testing::Test { value Float64, timestamp DateTime ) ENGINE = MergeTree() - ORDER BY id + ORDER BY timestamp )"); - + auto id_col = std::make_shared(); auto name_col = std::make_shared(); auto value_col = std::make_shared(); auto timestamp_col = std::make_shared(); - - std::time_t now = std::time(nullptr); + + // Add test data with increasing timestamps + std::time_t base_time = std::time(nullptr) - 3600; // Start 1 hour ago id_col->Append(1); name_col->Append("test1"); value_col->Append(10.5); - timestamp_col->Append(now); - + timestamp_col->Append(base_time); + id_col->Append(2); name_col->Append("test2"); value_col->Append(20.5); - timestamp_col->Append(now); - + timestamp_col->Append(base_time + 1800); // 30 minutes later + id_col->Append(3); name_col->Append("test3"); value_col->Append(30.5); - timestamp_col->Append(now); - + timestamp_col->Append(base_time + 3600); // 1 hour later + clickhouse::Block block; block.AppendColumn("id", id_col); block.AppendColumn("name", name_col); block.AppendColumn("value", value_col); block.AppendColumn("timestamp", timestamp_col); - + client_->Insert("test_table", block); - + LOG(INFO) << "Test table created and populated successfully"; } catch (const std::exception& e) { LOG(ERROR) << "Failed to create test table: " << e.what(); @@ -185,38 +182,40 @@ class ClickHouseSourceNodeTest : public ::testing::Test { TEST_F(ClickHouseSourceNodeTest, BasicQuery) { // Create ClickHouse source operator proto auto op_proto = planpb::testutils::CreateClickHouseSourceOperatorPB(); - std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op_proto, 1); - + std::unique_ptr plan_node = + plan::ClickHouseSourceOperator::FromProto(op_proto, 1); + // Define expected output schema - RowDescriptor output_rd({types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); - + RowDescriptor output_rd( + {types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); + // Create node tester auto tester = exec::ExecNodeTester( *plan_node, output_rd, std::vector({}), exec_state_.get()); - + // Verify state machine behavior EXPECT_TRUE(tester.node()->HasBatchesRemaining()); - - // First call should return data + + // First batch should return 2 rows (batch_size = 2) tester.GenerateNextResult().ExpectRowBatch( - RowBatchBuilder(output_rd, 3, /*eow*/ false, /*eos*/ false) - .AddColumn({1, 2, 3}) - .AddColumn({"test1", "test2", "test3"}) - .AddColumn({10.5, 20.5, 30.5}) + RowBatchBuilder(output_rd, 2, /*eow*/ false, /*eos*/ false) + .AddColumn({1, 2}) + .AddColumn({"test1", "test2"}) + .AddColumn({10.5, 20.5}) .get()); - - // ClickHouse returns all data at once, so next call should return empty batch with eos + + // Second batch should return remaining 1 row with eos EXPECT_TRUE(tester.node()->HasBatchesRemaining()); tester.GenerateNextResult().ExpectRowBatch( - RowBatchBuilder(output_rd, 0, /*eow*/ true, /*eos*/ true) - .AddColumn({}) - .AddColumn({}) - .AddColumn({}) + RowBatchBuilder(output_rd, 1, /*eow*/ true, /*eos*/ true) + .AddColumn({3}) + .AddColumn({"test3"}) + .AddColumn({30.5}) .get()); - + EXPECT_FALSE(tester.node()->HasBatchesRemaining()); tester.Close(); - + // Verify metrics EXPECT_EQ(3, tester.node()->RowsProcessed()); EXPECT_GT(tester.node()->BytesProcessed(), 0); @@ -229,11 +228,12 @@ TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { CREATE TABLE empty_table ( id UInt64, name String, - value Float64 + value Float64, + timestamp DateTime ) ENGINE = MergeTree() - ORDER BY id + ORDER BY timestamp )"); - + // Create operator that queries empty table planpb::Operator op; op.set_op_type(planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR); @@ -252,15 +252,16 @@ TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { ch_op->add_column_types(types::DataType::INT64); ch_op->add_column_types(types::DataType::STRING); ch_op->add_column_types(types::DataType::FLOAT64); - + std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op, 1); - RowDescriptor output_rd({types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); - + RowDescriptor output_rd( + {types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); + auto tester = exec::ExecNodeTester( *plan_node, output_rd, std::vector({}), exec_state_.get()); - + EXPECT_TRUE(tester.node()->HasBatchesRemaining()); - + // Should return empty batch with eos=true tester.GenerateNextResult().ExpectRowBatch( RowBatchBuilder(output_rd, 0, /*eow*/ true, /*eos*/ true) @@ -268,10 +269,10 @@ TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { .AddColumn({}) .AddColumn({}) .get()); - + EXPECT_FALSE(tester.node()->HasBatchesRemaining()); tester.Close(); - + EXPECT_EQ(0, tester.node()->RowsProcessed()); EXPECT_EQ(0, tester.node()->BytesProcessed()); } @@ -295,82 +296,28 @@ TEST_F(ClickHouseSourceNodeTest, FilteredQuery) { ch_op->add_column_types(types::DataType::INT64); ch_op->add_column_types(types::DataType::STRING); ch_op->add_column_types(types::DataType::FLOAT64); - + std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op, 1); - RowDescriptor output_rd({types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); - + RowDescriptor output_rd( + {types::DataType::INT64, types::DataType::STRING, types::DataType::FLOAT64}); + auto tester = exec::ExecNodeTester( *plan_node, output_rd, std::vector({}), exec_state_.get()); - + EXPECT_TRUE(tester.node()->HasBatchesRemaining()); - - // Should return filtered results + + // Should return all filtered results in one batch (2 rows < batch_size) tester.GenerateNextResult().ExpectRowBatch( - RowBatchBuilder(output_rd, 2, /*eow*/ false, /*eos*/ false) + RowBatchBuilder(output_rd, 2, /*eow*/ true, /*eos*/ true) .AddColumn({2, 3}) .AddColumn({"test2", "test3"}) .AddColumn({20.5, 30.5}) .get()); - - // Next call should return empty batch with eos - tester.GenerateNextResult().ExpectRowBatch( - RowBatchBuilder(output_rd, 0, /*eow*/ true, /*eos*/ true) - .AddColumn({}) - .AddColumn({}) - .AddColumn({}) - .get()); - - EXPECT_FALSE(tester.node()->HasBatchesRemaining()); - tester.Close(); - - EXPECT_EQ(2, tester.node()->RowsProcessed()); - EXPECT_GT(tester.node()->BytesProcessed(), 0); -} -TEST_F(ClickHouseSourceNodeTest, AggregateQuery) { - // Create operator with aggregate query - planpb::Operator op; - op.set_op_type(planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR); - auto* ch_op = op.mutable_clickhouse_source_op(); - ch_op->set_host("localhost"); - ch_op->set_port(kClickHousePort); - ch_op->set_username("default"); - ch_op->set_password("test_password"); - ch_op->set_database("default"); - ch_op->set_query("SELECT SUM(value) as sum_value, COUNT(*) as count FROM test_table"); - ch_op->set_batch_size(1024); - ch_op->set_streaming(false); - ch_op->add_column_names("sum_value"); - ch_op->add_column_names("count"); - ch_op->add_column_types(types::DataType::FLOAT64); - ch_op->add_column_types(types::DataType::INT64); - - std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op, 1); - RowDescriptor output_rd({types::DataType::FLOAT64, types::DataType::INT64}); - - auto tester = exec::ExecNodeTester( - *plan_node, output_rd, std::vector({}), exec_state_.get()); - - EXPECT_TRUE(tester.node()->HasBatchesRemaining()); - - // Should return aggregate result - tester.GenerateNextResult().ExpectRowBatch( - RowBatchBuilder(output_rd, 1, /*eow*/ false, /*eos*/ false) - .AddColumn({61.5}) // 10.5 + 20.5 + 30.5 - .AddColumn({3}) - .get()); - - // Next call should return empty batch with eos - tester.GenerateNextResult().ExpectRowBatch( - RowBatchBuilder(output_rd, 0, /*eow*/ true, /*eos*/ true) - .AddColumn({}) - .AddColumn({}) - .get()); - EXPECT_FALSE(tester.node()->HasBatchesRemaining()); tester.Close(); - - EXPECT_EQ(1, tester.node()->RowsProcessed()); + + EXPECT_EQ(2, tester.node()->RowsProcessed()); EXPECT_GT(tester.node()->BytesProcessed(), 0); } diff --git a/src/carnot/planpb/test_proto.h b/src/carnot/planpb/test_proto.h index 1228e45b672..bdc8aece287 100644 --- a/src/carnot/planpb/test_proto.h +++ b/src/carnot/planpb/test_proto.h @@ -201,8 +201,8 @@ port: 9000 username: "default" password: "test_password" database: "default" -query: "SELECT id, name, value FROM test_table ORDER BY id" -batch_size: 1024 +query: "SELECT id, name, value FROM test_table" +batch_size: 2 streaming: false column_names: "id" column_names: "name" @@ -1347,7 +1347,7 @@ planpb::Operator CreateTestSource1PB(const std::string& table_name = "cpu") { planpb::Operator CreateClickHouseSourceOperatorPB() { planpb::Operator op; - auto op_proto = absl::Substitute(kOperatorProtoTmpl, "CLICKHOUSE_SOURCE_OPERATOR", + auto op_proto = absl::Substitute(kOperatorProtoTmpl, "CLICKHOUSE_SOURCE_OPERATOR", "clickhouse_source_op", kClickHouseSourceOperator); CHECK(google::protobuf::TextFormat::MergeFromString(op_proto, &op)) << "Failed to parse proto"; return op; @@ -1421,9 +1421,9 @@ planpb::Operator CreateTestClickHouseSourcePB() { column_types: STRING column_types: FLOAT64 )"; - + planpb::Operator op; - auto op_proto = absl::Substitute(kOperatorProtoTmpl, "CLICKHOUSE_SOURCE_OPERATOR", + auto op_proto = absl::Substitute(kOperatorProtoTmpl, "CLICKHOUSE_SOURCE_OPERATOR", "clickhouse_source_op", kClickHouseSourceOperator); CHECK(google::protobuf::TextFormat::MergeFromString(op_proto, &op)) << "Failed to parse proto"; return op; From 6bf5a411da5fc57d75f99087a43a250184eade51 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Fri, 3 Oct 2025 03:21:24 +0000 Subject: [PATCH 61/86] Add partition column, get clickhouse_source_node_test working and failing logical_planner_test in place Signed-off-by: Dom Del Nano --- src/carnot/exec/clickhouse_source_node.cc | 221 +++++++++++++++--- src/carnot/exec/clickhouse_source_node.h | 5 +- .../exec/clickhouse_source_node_test.cc | 21 +- src/carnot/plan/operators.h | 13 +- src/carnot/planner/logical_planner_test.cc | 42 ++++ src/carnot/planpb/plan.proto | 14 ++ src/carnot/planpb/test_proto.h | 4 + 7 files changed, 279 insertions(+), 41 deletions(-) diff --git a/src/carnot/exec/clickhouse_source_node.cc b/src/carnot/exec/clickhouse_source_node.cc index 01a70627e94..2029970efa1 100644 --- a/src/carnot/exec/clickhouse_source_node.cc +++ b/src/carnot/exec/clickhouse_source_node.cc @@ -62,9 +62,17 @@ Status ClickHouseSourceNode::InitImpl(const plan::Operator& plan_node) { has_more_data_ = true; current_block_index_ = 0; - // TODO(ddelnano): Extract time column and start/stop times from the plan node - // For now, use timestamp column for time filtering - time_column_ = "timestamp"; + // Extract time filtering parameters from plan node + timestamp_column_ = plan_node_->timestamp_column(); + partition_column_ = plan_node_->partition_column(); + + // Convert start/end times from nanoseconds to seconds for ClickHouse DateTime + if (plan_node_->start_time() > 0) { + start_time_ = plan_node_->start_time() / 1000000000LL; // Convert ns to seconds + } + if (plan_node_->end_time() > 0) { + end_time_ = plan_node_->end_time() / 1000000000LL; // Convert ns to seconds + } return Status::OK(); } @@ -322,38 +330,75 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock std::string ClickHouseSourceNode::BuildQuery() { std::string query = base_query_; - std::string where_clause; std::vector conditions; - // Add time filtering if start/stop times are specified and time column is set - if (!time_column_.empty()) { + // Add time filtering if start/end times are specified and timestamp column is set + if (!timestamp_column_.empty()) { if (start_time_.has_value()) { - conditions.push_back(absl::Substitute("$0 >= $1", time_column_, start_time_.value())); + conditions.push_back(absl::Substitute("$0 >= $1", timestamp_column_, start_time_.value())); } - if (stop_time_.has_value()) { - conditions.push_back(absl::Substitute("$0 <= $1", time_column_, stop_time_.value())); + if (end_time_.has_value()) { + conditions.push_back(absl::Substitute("$0 <= $1", timestamp_column_, end_time_.value())); } } - // Check if the base query already has a WHERE clause + // Add partition column filtering if specified + if (!partition_column_.empty()) { + // TODO(ddelnano): For now, we assume the partition column filtering is handled by the base + // query In a real implementation, we might need to add specific partition filtering logic This + // could involve extracting partition values from the time range or other criteria + } + + // Parse the base query to find WHERE and ORDER BY positions std::string lower_query = query; std::transform(lower_query.begin(), lower_query.end(), lower_query.begin(), ::tolower); - bool has_where = lower_query.find(" where ") != std::string::npos; + size_t where_pos = lower_query.find(" where "); + size_t order_by_pos = lower_query.find(" order by "); + size_t limit_pos = lower_query.find(" limit "); + + // Determine insertion point for conditions if (!conditions.empty()) { - if (has_where) { - where_clause = " AND " + absl::StrJoin(conditions, " AND "); + std::string conditions_clause = absl::StrJoin(conditions, " AND "); + + if (where_pos != std::string::npos) { + // Query already has WHERE clause + size_t insert_pos = std::string::npos; + + // Find where to insert the additional conditions + if (order_by_pos != std::string::npos && order_by_pos > where_pos) { + insert_pos = order_by_pos; + } else if (limit_pos != std::string::npos && limit_pos > where_pos) { + insert_pos = limit_pos; + } else { + insert_pos = query.length(); + } + + query.insert(insert_pos, " AND " + conditions_clause); } else { - where_clause = " WHERE " + absl::StrJoin(conditions, " AND "); + // No WHERE clause, need to add one + size_t insert_pos = std::string::npos; + + if (order_by_pos != std::string::npos) { + insert_pos = order_by_pos; + } else if (limit_pos != std::string::npos) { + insert_pos = limit_pos; + } else { + insert_pos = query.length(); + } + + query.insert(insert_pos, " WHERE " + conditions_clause); } - query += where_clause; } - // Add ORDER BY clause (needed for consistent pagination) - // If no ORDER BY exists, add one - prefer time column if available, otherwise use first column + // Update lower_query after modifications + lower_query = query; + std::transform(lower_query.begin(), lower_query.end(), lower_query.begin(), ::tolower); + + // Add ORDER BY clause if needed if (lower_query.find(" order by ") == std::string::npos) { - if (!time_column_.empty()) { - query += absl::Substitute(" ORDER BY $0", time_column_); + if (!timestamp_column_.empty()) { + query += absl::Substitute(" ORDER BY $0", timestamp_column_); } else { // Fall back to ordering by first column for consistent pagination query += " ORDER BY 1"; @@ -376,17 +421,21 @@ Status ClickHouseSourceNode::ExecuteBatchQuery() { } std::string query = BuildQuery(); + VLOG(1) << "Executing ClickHouse query: " << query; try { size_t rows_received = 0; client_->Select(query, [this, &rows_received](const clickhouse::Block& block) { // Only store non-empty blocks if (block.GetRowCount() > 0) { + VLOG(1) << "Received block with " << block.GetRowCount() << " rows"; current_batch_blocks_.push_back(block); rows_received += block.GetRowCount(); } }); + VLOG(1) << "Total rows received: " << rows_received << ", batch size: " << batch_size_; + // Update cursor state current_offset_ += rows_received; if (rows_received < batch_size_) { @@ -401,8 +450,11 @@ Status ClickHouseSourceNode::ExecuteBatchQuery() { } Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { - // If we've processed all blocks in current batch, fetch the next batch + // If we need to fetch more data if (current_block_index_ >= current_batch_blocks_.size()) { + current_block_index_ = 0; + current_batch_blocks_.clear(); + if (!has_more_data_) { // No more data available - send empty batch with eos=true PX_ASSIGN_OR_RETURN(auto empty_batch, @@ -423,28 +475,131 @@ Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { } } - // Process current block - const auto& current_block = current_batch_blocks_[current_block_index_]; - bool is_last_block = - (current_block_index_ == current_batch_blocks_.size() - 1) && !has_more_data_; + // Calculate total rows in all blocks + size_t total_rows = 0; + for (const auto& block : current_batch_blocks_) { + total_rows += block.GetRowCount(); + } + + // Create a merged RowBatch + auto merged_batch = std::make_unique(*output_descriptor_, total_rows); + + // Process each column + for (size_t col_idx = 0; col_idx < output_descriptor_->size(); ++col_idx) { + // Get the data type from output descriptor + auto data_type = output_descriptor_->type(col_idx); + + // Create appropriate builder based on data type + std::shared_ptr builder; + switch (data_type) { + case types::DataType::INT64: + builder = std::make_shared(); + break; + case types::DataType::FLOAT64: + builder = std::make_shared(); + break; + case types::DataType::STRING: + builder = std::make_shared(); + break; + case types::DataType::BOOLEAN: + builder = std::make_shared(); + break; + case types::DataType::TIME64NS: + builder = std::make_shared(); + break; + default: + return error::InvalidArgument("Unsupported data type for column $0", col_idx); + } - PX_ASSIGN_OR_RETURN(auto row_batch, - ConvertClickHouseBlockToRowBatch(current_block, is_last_block)); + // Reserve space for all rows + PX_RETURN_IF_ERROR(builder->Reserve(total_rows)); + + // Append data from all blocks + for (const auto& block : current_batch_blocks_) { + PX_ASSIGN_OR_RETURN(auto row_batch, ConvertClickHouseBlockToRowBatch(block, false)); + auto array = row_batch->ColumnAt(col_idx); + + // Append values from this block's array + switch (data_type) { + case types::DataType::INT64: + case types::DataType::TIME64NS: { + auto typed_array = std::static_pointer_cast(array); + auto typed_builder = std::static_pointer_cast(builder); + for (int i = 0; i < typed_array->length(); i++) { + if (typed_array->IsNull(i)) { + PX_RETURN_IF_ERROR(typed_builder->AppendNull()); + } else { + typed_builder->UnsafeAppend(typed_array->Value(i)); + } + } + break; + } + case types::DataType::FLOAT64: { + auto typed_array = std::static_pointer_cast(array); + auto typed_builder = std::static_pointer_cast(builder); + for (int i = 0; i < typed_array->length(); i++) { + if (typed_array->IsNull(i)) { + PX_RETURN_IF_ERROR(typed_builder->AppendNull()); + } else { + typed_builder->UnsafeAppend(typed_array->Value(i)); + } + } + break; + } + case types::DataType::STRING: { + auto typed_array = std::static_pointer_cast(array); + auto typed_builder = std::static_pointer_cast(builder); + for (int i = 0; i < typed_array->length(); i++) { + if (typed_array->IsNull(i)) { + PX_RETURN_IF_ERROR(typed_builder->AppendNull()); + } else { + PX_RETURN_IF_ERROR(typed_builder->Append(typed_array->GetString(i))); + } + } + break; + } + case types::DataType::BOOLEAN: { + auto typed_array = std::static_pointer_cast(array); + auto typed_builder = std::static_pointer_cast(builder); + for (int i = 0; i < typed_array->length(); i++) { + if (typed_array->IsNull(i)) { + PX_RETURN_IF_ERROR(typed_builder->AppendNull()); + } else { + typed_builder->UnsafeAppend(typed_array->Value(i)); + } + } + break; + } + default: + return error::InvalidArgument("Unsupported data type for column $0", col_idx); + } + } + + // Finish building and add column + std::shared_ptr merged_array; + PX_RETURN_IF_ERROR(builder->Finish(&merged_array)); + PX_RETURN_IF_ERROR(merged_batch->AddColumn(merged_array)); + } // Set proper end-of-window and end-of-stream flags - if (is_last_block) { - row_batch->set_eow(true); - row_batch->set_eos(true); + bool is_last_batch = !has_more_data_; + if (is_last_batch) { + merged_batch->set_eow(true); + merged_batch->set_eos(true); + } else { + merged_batch->set_eow(false); + merged_batch->set_eos(false); } // Update stats - rows_processed_ += row_batch->num_rows(); - bytes_processed_ += row_batch->NumBytes(); + rows_processed_ += merged_batch->num_rows(); + bytes_processed_ += merged_batch->NumBytes(); // Send to children - PX_RETURN_IF_ERROR(SendRowBatchToChildren(exec_state, *row_batch)); + PX_RETURN_IF_ERROR(SendRowBatchToChildren(exec_state, *merged_batch)); - current_block_index_++; + // Mark all blocks as processed + current_block_index_ = current_batch_blocks_.size(); return Status::OK(); } diff --git a/src/carnot/exec/clickhouse_source_node.h b/src/carnot/exec/clickhouse_source_node.h index 3a71afe6e2e..84a14c9063a 100644 --- a/src/carnot/exec/clickhouse_source_node.h +++ b/src/carnot/exec/clickhouse_source_node.h @@ -84,8 +84,9 @@ class ClickHouseSourceNode : public SourceNode { // Time filtering std::optional start_time_; - std::optional stop_time_; - std::string time_column_; // Column to use for time filtering + std::optional end_time_; + std::string timestamp_column_; // Column to use for timestamp-based filtering and ordering + std::string partition_column_; // Column used for partitioning // ClickHouse client std::unique_ptr client_; diff --git a/src/carnot/exec/clickhouse_source_node_test.cc b/src/carnot/exec/clickhouse_source_node_test.cc index fb6e98ee18a..1712de15a8f 100644 --- a/src/carnot/exec/clickhouse_source_node_test.cc +++ b/src/carnot/exec/clickhouse_source_node_test.cc @@ -131,8 +131,10 @@ class ClickHouseSourceNodeTest : public ::testing::Test { id UInt64, name String, value Float64, - timestamp DateTime + timestamp DateTime, + partition_key String ) ENGINE = MergeTree() + PARTITION BY (timestamp, partition_key) ORDER BY timestamp )"); @@ -140,6 +142,7 @@ class ClickHouseSourceNodeTest : public ::testing::Test { auto name_col = std::make_shared(); auto value_col = std::make_shared(); auto timestamp_col = std::make_shared(); + auto partition_key_col = std::make_shared(); // Add test data with increasing timestamps std::time_t base_time = std::time(nullptr) - 3600; // Start 1 hour ago @@ -147,22 +150,26 @@ class ClickHouseSourceNodeTest : public ::testing::Test { name_col->Append("test1"); value_col->Append(10.5); timestamp_col->Append(base_time); + partition_key_col->Append("partition_a"); id_col->Append(2); name_col->Append("test2"); value_col->Append(20.5); timestamp_col->Append(base_time + 1800); // 30 minutes later + partition_key_col->Append("partition_a"); id_col->Append(3); name_col->Append("test3"); value_col->Append(30.5); timestamp_col->Append(base_time + 3600); // 1 hour later + partition_key_col->Append("partition_b"); clickhouse::Block block; block.AppendColumn("id", id_col); block.AppendColumn("name", name_col); block.AppendColumn("value", value_col); block.AppendColumn("timestamp", timestamp_col); + block.AppendColumn("partition_key", partition_key_col); client_->Insert("test_table", block); @@ -229,8 +236,10 @@ TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { id UInt64, name String, value Float64, - timestamp DateTime + timestamp DateTime, + partition_key String ) ENGINE = MergeTree() + PARTITION BY (timestamp, partition_key) ORDER BY timestamp )"); @@ -252,6 +261,10 @@ TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { ch_op->add_column_types(types::DataType::INT64); ch_op->add_column_types(types::DataType::STRING); ch_op->add_column_types(types::DataType::FLOAT64); + ch_op->set_timestamp_column("timestamp"); + ch_op->set_partition_column("partition_key"); + ch_op->set_start_time(1000000000000000000LL); // Year 2001 in nanoseconds + ch_op->set_end_time(9223372036854775807LL); // Max int64 std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op, 1); RowDescriptor output_rd( @@ -296,6 +309,10 @@ TEST_F(ClickHouseSourceNodeTest, FilteredQuery) { ch_op->add_column_types(types::DataType::INT64); ch_op->add_column_types(types::DataType::STRING); ch_op->add_column_types(types::DataType::FLOAT64); + ch_op->set_timestamp_column("timestamp"); + ch_op->set_partition_column("partition_key"); + ch_op->set_start_time(1000000000000000000LL); // Year 2001 in nanoseconds + ch_op->set_end_time(9223372036854775807LL); // Max int64 std::unique_ptr plan_node = plan::ClickHouseSourceOperator::FromProto(op, 1); RowDescriptor output_rd( diff --git a/src/carnot/plan/operators.h b/src/carnot/plan/operators.h index 143ecf53674..e65a0deafc3 100644 --- a/src/carnot/plan/operators.h +++ b/src/carnot/plan/operators.h @@ -361,15 +361,16 @@ class EmptySourceOperator : public Operator { class ClickHouseSourceOperator : public Operator { public: - explicit ClickHouseSourceOperator(int64_t id) : Operator(id, planpb::CLICKHOUSE_SOURCE_OPERATOR) {} + explicit ClickHouseSourceOperator(int64_t id) + : Operator(id, planpb::CLICKHOUSE_SOURCE_OPERATOR) {} ~ClickHouseSourceOperator() override = default; - + StatusOr OutputRelation( const table_store::schema::Schema& schema, const PlanState& state, const std::vector& input_ids) const override; Status Init(const planpb::ClickHouseSourceOperator& pb); std::string DebugString() const override; - + std::string host() const { return pb_.host(); } int32_t port() const { return pb_.port(); } std::string username() const { return pb_.username(); } @@ -389,7 +390,11 @@ class ClickHouseSourceOperator : public Operator { } return types; } - + std::string timestamp_column() const { return pb_.timestamp_column(); } + std::string partition_column() const { return pb_.partition_column(); } + int64_t start_time() const { return pb_.start_time(); } + int64_t end_time() const { return pb_.end_time(); } + private: planpb::ClickHouseSourceOperator pb_; }; diff --git a/src/carnot/planner/logical_planner_test.cc b/src/carnot/planner/logical_planner_test.cc index 4c3e8659c88..c40453b4a75 100644 --- a/src/carnot/planner/logical_planner_test.cc +++ b/src/carnot/planner/logical_planner_test.cc @@ -1039,6 +1039,48 @@ px.export(otel_df, px.otel.Data( )))otel"); } +constexpr char kClickHouseSourceQuery[] = R"pxl( +import px + +# Test ClickHouse source node functionality +df = px.DataFrame('http_events', start_time='-10m', end_time='-5m') +px.display(df, 'clickhouse_data') +)pxl"; + +TEST_F(LogicalPlannerTest, ClickHouseSourceNode) { + auto planner = LogicalPlanner::Create(info_).ConsumeValueOrDie(); + + // Create a test schema that includes a ClickHouse table + auto state = testutils::CreateTwoPEMsOneKelvinPlannerState(testutils::kHttpEventsSchema); + + auto plan_or_s = planner->Plan(MakeQueryRequest(state, kClickHouseSourceQuery)); + EXPECT_OK(plan_or_s); + auto plan = plan_or_s.ConsumeValueOrDie(); + EXPECT_OK(plan->ToProto()); + + // Verify the plan contains ClickHouse source operators + auto plan_pb = plan->ToProto().ConsumeValueOrDie(); + bool has_clickhouse_source = false; + + for (const auto& [address, agent_plan] : plan_pb.qb_address_to_plan()) { + for (const auto& planFragment : agent_plan.nodes()) { + for (const auto& planNode : planFragment.nodes()) { + if (planNode.op().op_type() == planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR) { + has_clickhouse_source = true; + break; + } + } + if (has_clickhouse_source) break; + } + if (has_clickhouse_source) break; + } + + // Note: This test validates that the planner can process ClickHouse queries + // The actual presence of ClickHouse operators depends on the table configuration + EXPECT_OK(plan->ToProto()); + EXPECT_TRUE(has_clickhouse_source); +} + } // namespace planner } // namespace carnot } // namespace px diff --git a/src/carnot/planpb/plan.proto b/src/carnot/planpb/plan.proto index a906e54c3bf..49849b7abbf 100644 --- a/src/carnot/planpb/plan.proto +++ b/src/carnot/planpb/plan.proto @@ -383,6 +383,20 @@ message ClickHouseSourceOperator { // Whether to stream results (future enhancement) bool streaming = 10; + + // Column name to use for timestamp-based filtering and ordering + // This column should be of DateTime or DateTime64 type + string timestamp_column = 11; + + // Column name to use for partitioning + // The underlying ClickHouse table should be partitioned by this column + string partition_column = 12; + + // Start time for time-based filtering (nanoseconds since epoch) + int64 start_time = 13; + + // End time for time-based filtering (nanoseconds since epoch) + int64 end_time = 14; } // OTelLog maps operator columns to each field in the OpenTelemetry Log configuration. diff --git a/src/carnot/planpb/test_proto.h b/src/carnot/planpb/test_proto.h index bdc8aece287..474cfde2ad4 100644 --- a/src/carnot/planpb/test_proto.h +++ b/src/carnot/planpb/test_proto.h @@ -210,6 +210,10 @@ column_names: "value" column_types: INT64 column_types: STRING column_types: FLOAT64 +timestamp_column: "timestamp" +partition_column: "partition_key" +start_time: 1000000000000000000 +end_time: 9223372036854775807 )"; constexpr char kBlockingAggOperator1[] = R"( From 2fef3e8ed2d24e1ed192a98bd821470c64f73920 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Fri, 3 Oct 2025 04:56:25 +0000 Subject: [PATCH 62/86] Update carnot_executable to start a clickhouse container and make changes to get partway through a clickhouse pxl script executing. Currently failing at grpc_sink_node exec Signed-off-by: Dom Del Nano --- src/carnot/BUILD.bazel | 9 + src/carnot/carnot.cc | 1 + src/carnot/carnot_executable.cc | 231 +++++++++++++++++- src/carnot/exec/BUILD.bazel | 7 +- src/carnot/exec/clickhouse_source_node.cc | 48 +++- src/carnot/exec/exec_graph.cc | 5 + src/carnot/plan/plan_fragment.cc | 3 + src/carnot/plan/plan_fragment.h | 8 + src/carnot/planner/ir/all_ir_nodes.h | 1 + src/carnot/planner/ir/clickhouse_source_ir.cc | 147 +++++++++++ src/carnot/planner/ir/clickhouse_source_ir.h | 111 +++++++++ src/carnot/planner/ir/operators.inl | 1 + src/carnot/planner/logical_planner_test.cc | 3 +- src/carnot/planner/objects/dataframe.cc | 63 +++-- 14 files changed, 615 insertions(+), 23 deletions(-) create mode 100644 src/carnot/planner/ir/clickhouse_source_ir.cc create mode 100644 src/carnot/planner/ir/clickhouse_source_ir.h diff --git a/src/carnot/BUILD.bazel b/src/carnot/BUILD.bazel index 664599ad9c0..a796db3363c 100644 --- a/src/carnot/BUILD.bazel +++ b/src/carnot/BUILD.bazel @@ -98,7 +98,16 @@ pl_cc_binary( pl_cc_binary( name = "carnot_executable", srcs = ["carnot_executable.cc"], + data = [ + "//src/stirling/source_connectors/socket_tracer/testing/container_images:clickhouse.tar", + ], + tags = [ + "requires_docker", + ], deps = [ ":cc_library", + "//src/common/testing:cc_library", + "//src/common/testing/test_utils:cc_library", + "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", ], ) diff --git a/src/carnot/carnot.cc b/src/carnot/carnot.cc index a466bb5194d..a7b32f51c79 100644 --- a/src/carnot/carnot.cc +++ b/src/carnot/carnot.cc @@ -181,6 +181,7 @@ Status CarnotImpl::RegisterUDFsInPlanFragment(exec::ExecState* exec_state, plan: .OnUDTFSource(no_op) .OnEmptySource(no_op) .OnOTelSink(no_op) + .OnClickHouseSource(no_op) .Walk(pf); } diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index 52a3d46cd7f..0c59dd41699 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -16,9 +16,14 @@ * SPDX-License-Identifier: Apache-2.0 */ +#include + +#include #include #include +#include #include +#include #include #include @@ -28,6 +33,8 @@ #include "src/carnot/exec/local_grpc_result_server.h" #include "src/carnot/funcs/funcs.h" #include "src/common/base/base.h" +#include "src/common/testing/test_environment.h" +#include "src/common/testing/test_utils/container_runner.h" #include "src/shared/types/column_wrapper.h" #include "src/shared/types/type_utils.h" #include "src/table_store/table_store.h" @@ -46,6 +53,9 @@ DEFINE_string(table_name, gflags::StringFromEnv("TABLE_NAME", "csv_table"), DEFINE_int64(rowbatch_size, gflags::Int64FromEnv("ROWBATCH_SIZE", 100), "The size of the rowbatches."); +DEFINE_bool(use_clickhouse, gflags::BoolFromEnv("USE_CLICKHOUSE", false), + "Whether to start a ClickHouse container with test data."); + using px::types::DataType; namespace { @@ -225,6 +235,151 @@ void TableToCsv(const std::string& filename, output_csv.close(); } +// ClickHouse container configuration +constexpr char kClickHouseImage[] = + "src/stirling/source_connectors/socket_tracer/testing/container_images/clickhouse.tar"; +constexpr char kClickHouseReadyMessage[] = "Ready for connections"; +constexpr int kClickHousePort = 9000; + +/** + * Sets up a ClickHouse client connection with retries. + */ +std::unique_ptr SetupClickHouseClient() { + clickhouse::ClientOptions client_options; + client_options.SetHost("localhost"); + client_options.SetPort(kClickHousePort); + client_options.SetUser("default"); + client_options.SetPassword("test_password"); + client_options.SetDefaultDatabase("default"); + + const int kMaxRetries = 10; + for (int i = 0; i < kMaxRetries; ++i) { + LOG(INFO) << "Attempting to connect to ClickHouse (attempt " << (i + 1) << "/" << kMaxRetries + << ")..."; + try { + auto client = std::make_unique(client_options); + client->Execute("SELECT 1"); + LOG(INFO) << "Successfully connected to ClickHouse"; + return client; + } catch (const std::exception& e) { + LOG(WARNING) << "Failed to connect: " << e.what(); + if (i < kMaxRetries - 1) { + std::this_thread::sleep_for(std::chrono::seconds(2)); + } else { + LOG(FATAL) << "Failed to connect to ClickHouse after " << kMaxRetries << " attempts"; + } + } + } + return nullptr; +} + +/** + * Creates the http_events table in ClickHouse with proper schema and sample data. + */ +void CreateHttpEventsTable(clickhouse::Client* client) { + try { + client->Execute("DROP TABLE IF EXISTS http_events"); + + // Create table with http_events schema plus hostname and event_time + client->Execute(R"( + CREATE TABLE http_events ( + time_ DateTime64(9), + local_addr String, + local_port Int64, + remote_addr String, + remote_port Int64, + major_version Int64, + minor_version Int64, + content_type Int64, + req_headers String, + req_method String, + req_path String, + req_body String, + resp_headers String, + resp_status Int64, + resp_message String, + resp_body String, + resp_latency_ns Int64, + hostname String, + event_time DateTime64(3) + ) ENGINE = MergeTree() + PARTITION BY toYYYYMM(event_time) + ORDER BY (hostname, event_time) + )"); + + // Insert sample data + auto time_col = std::make_shared(9); + auto local_addr_col = std::make_shared(); + auto local_port_col = std::make_shared(); + auto remote_addr_col = std::make_shared(); + auto remote_port_col = std::make_shared(); + auto major_version_col = std::make_shared(); + auto minor_version_col = std::make_shared(); + auto content_type_col = std::make_shared(); + auto req_headers_col = std::make_shared(); + auto req_method_col = std::make_shared(); + auto req_path_col = std::make_shared(); + auto req_body_col = std::make_shared(); + auto resp_headers_col = std::make_shared(); + auto resp_status_col = std::make_shared(); + auto resp_message_col = std::make_shared(); + auto resp_body_col = std::make_shared(); + auto resp_latency_ns_col = std::make_shared(); + auto hostname_col = std::make_shared(); + auto event_time_col = std::make_shared(3); + + // Add sample rows + std::time_t now = std::time(nullptr); + for (int i = 0; i < 10; ++i) { + time_col->Append((now - 600 + i * 60) * 1000000000LL); // Convert to nanoseconds + local_addr_col->Append("127.0.0.1"); + local_port_col->Append(8080); + remote_addr_col->Append(absl::StrFormat("192.168.1.%d", 100 + i)); + remote_port_col->Append(50000 + i); + major_version_col->Append(1); + minor_version_col->Append(1); + content_type_col->Append(0); + req_headers_col->Append("Content-Type: application/json"); + req_method_col->Append(i % 2 == 0 ? "GET" : "POST"); + req_path_col->Append(absl::StrFormat("/api/v1/resource/%d", i)); + req_body_col->Append(i % 2 == 0 ? "" : "{\"data\": \"test\"}"); + resp_headers_col->Append("Content-Type: application/json"); + resp_status_col->Append(200); + resp_message_col->Append("OK"); + resp_body_col->Append("{\"result\": \"success\"}"); + resp_latency_ns_col->Append(1000000 + i * 100000); + hostname_col->Append(absl::StrFormat("host-%d", i % 3)); + event_time_col->Append((now - 600 + i * 60) * 1000LL); // Convert to milliseconds + } + + clickhouse::Block block; + block.AppendColumn("time_", time_col); + block.AppendColumn("local_addr", local_addr_col); + block.AppendColumn("local_port", local_port_col); + block.AppendColumn("remote_addr", remote_addr_col); + block.AppendColumn("remote_port", remote_port_col); + block.AppendColumn("major_version", major_version_col); + block.AppendColumn("minor_version", minor_version_col); + block.AppendColumn("content_type", content_type_col); + block.AppendColumn("req_headers", req_headers_col); + block.AppendColumn("req_method", req_method_col); + block.AppendColumn("req_path", req_path_col); + block.AppendColumn("req_body", req_body_col); + block.AppendColumn("resp_headers", resp_headers_col); + block.AppendColumn("resp_status", resp_status_col); + block.AppendColumn("resp_message", resp_message_col); + block.AppendColumn("resp_body", resp_body_col); + block.AppendColumn("resp_latency_ns", resp_latency_ns_col); + block.AppendColumn("hostname", hostname_col); + block.AppendColumn("event_time", event_time_col); + + client->Insert("http_events", block); + LOG(INFO) << "http_events table created and populated successfully"; + } catch (const std::exception& e) { + LOG(FATAL) << "Failed to create http_events table: " << e.what(); + } +} + } // namespace int main(int argc, char* argv[]) { @@ -235,8 +390,44 @@ int main(int argc, char* argv[]) { auto query = FLAGS_query; auto rb_size = FLAGS_rowbatch_size; auto table_name = FLAGS_table_name; + auto use_clickhouse = FLAGS_use_clickhouse; + + // ClickHouse container and client (if enabled) + std::unique_ptr clickhouse_server; + std::unique_ptr clickhouse_client; + + std::shared_ptr table; + + if (use_clickhouse) { + LOG(INFO) << "Starting ClickHouse container..."; + clickhouse_server = + std::make_unique(px::testing::BazelRunfilePath(kClickHouseImage), + "clickhouse_carnot", kClickHouseReadyMessage); - auto table = GetTableFromCsv(filename, rb_size); + std::vector options = { + absl::Substitute("--publish=$0:$0", kClickHousePort), + "--env=CLICKHOUSE_PASSWORD=test_password", + "--network=host", + }; + + auto status = clickhouse_server->Run(std::chrono::seconds{60}, options, {}, true, + std::chrono::seconds{300}); + if (!status.ok()) { + LOG(FATAL) << "Failed to start ClickHouse container: " << status.msg(); + } + + // Give ClickHouse time to initialize + LOG(INFO) << "Waiting for ClickHouse to initialize..."; + std::this_thread::sleep_for(std::chrono::seconds(5)); + + // Setup ClickHouse client and create test table + clickhouse_client = SetupClickHouseClient(); + CreateHttpEventsTable(clickhouse_client.get()); + LOG(INFO) << "ClickHouse ready with http_events table"; + } else { + // Only load CSV if not using ClickHouse + table = GetTableFromCsv(filename, rb_size); + } // Execute query. auto table_store = std::make_shared(); @@ -257,7 +448,43 @@ int main(int argc, char* argv[]) { auto carnot = px::carnot::Carnot::Create(sole::uuid4(), std::move(func_registry), table_store, std::move(clients_config), std::move(server_config)) .ConsumeValueOrDie(); - table_store->AddTable(table_name, table); + + if (use_clickhouse) { + // Create http_events table schema in table_store + std::vector types = { + px::types::DataType::TIME64NS, // time_ + px::types::DataType::STRING, // local_addr + px::types::DataType::INT64, // local_port + px::types::DataType::STRING, // remote_addr + px::types::DataType::INT64, // remote_port + px::types::DataType::INT64, // major_version + px::types::DataType::INT64, // minor_version + px::types::DataType::INT64, // content_type + px::types::DataType::STRING, // req_headers + px::types::DataType::STRING, // req_method + px::types::DataType::STRING, // req_path + px::types::DataType::STRING, // req_body + px::types::DataType::STRING, // resp_headers + px::types::DataType::INT64, // resp_status + px::types::DataType::STRING, // resp_message + px::types::DataType::STRING, // resp_body + px::types::DataType::INT64, // resp_latency_ns + px::types::DataType::STRING, // hostname + px::types::DataType::TIME64NS, // event_time + }; + std::vector names = { + "time_", "local_addr", "local_port", "remote_addr", "remote_port", + "major_version", "minor_version", "content_type", "req_headers", "req_method", + "req_path", "req_body", "resp_headers", "resp_status", "resp_message", + "resp_body", "resp_latency_ns", "hostname", "event_time"}; + px::table_store::schema::Relation rel(types, names); + auto http_events_table = px::table_store::Table::Create("http_events", rel); + table_store->AddTable("http_events", http_events_table); + } else if (table != nullptr) { + // Add CSV table to table_store + table_store->AddTable(table_name, table); + } + auto exec_status = carnot->ExecuteQuery(query, sole::uuid4(), px::CurrentTimeNS()); if (!exec_status.ok()) { LOG(FATAL) << absl::Substitute("Query failed to execute: $0", exec_status.msg()); diff --git a/src/carnot/exec/BUILD.bazel b/src/carnot/exec/BUILD.bazel index e845cf563d3..5dcc483ca05 100644 --- a/src/carnot/exec/BUILD.bazel +++ b/src/carnot/exec/BUILD.bazel @@ -33,9 +33,9 @@ pl_cc_library( ], ), hdrs = [ + "clickhouse_source_node.h", "exec_node.h", "exec_state.h", - "clickhouse_source_node.h", ], deps = [ "//src/carnot/carnotpb:carnot_pl_cc_proto", @@ -305,13 +305,16 @@ pl_cc_test( pl_cc_test( name = "clickhouse_source_node_test", + timeout = "long", + timeout = "long", + timeout = "long", srcs = ["clickhouse_source_node_test.cc"], data = [ "//src/stirling/source_connectors/socket_tracer/testing/container_images:clickhouse.tar", ], tags = [ - "requires_docker", "exclusive", + "requires_docker", ], deps = [ ":cc_library", diff --git a/src/carnot/exec/clickhouse_source_node.cc b/src/carnot/exec/clickhouse_source_node.cc index 2029970efa1..8cc2a8bab86 100644 --- a/src/carnot/exec/clickhouse_source_node.cc +++ b/src/carnot/exec/clickhouse_source_node.cc @@ -136,7 +136,7 @@ StatusOr ClickHouseSourceNode::ClickHouseTypeToPixieType( } // Date/time types - if (type_name == "DateTime" || type_name == "DateTime64") { + if (type_name == "DateTime" || type_name.find("DateTime64") == 0) { return types::DataType::TIME64NS; } @@ -315,6 +315,52 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock PX_RETURN_IF_ERROR(builder.Finish(&array)); PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else if (type_name.find("DateTime64") == 0) { + auto typed_col = ch_column->As(); + arrow::Int64Builder builder; + PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); + + for (size_t i = 0; i < num_rows; ++i) { + // DateTime64 stores time with sub-second precision + // The value is already in the correct precision (e.g., nanoseconds for DateTime64(9)) + // We need to convert to nanoseconds if it's not already + int64_t value = typed_col->At(i); + + // Extract precision from type name (e.g., "DateTime64(9)" -> 9) + size_t precision = 3; // default to milliseconds + size_t start = type_name.find('('); + if (start != std::string::npos) { + size_t end = type_name.find(')', start); + if (end != std::string::npos) { + precision = std::stoi(type_name.substr(start + 1, end - start - 1)); + } + } + + // Convert to nanoseconds based on precision + int64_t ns = value; + if (precision < 9) { + // Scale up to nanoseconds + int64_t multiplier = 1; + for (size_t p = precision; p < 9; p++) { + multiplier *= 10; + } + ns = value * multiplier; + } else if (precision > 9) { + // Scale down to nanoseconds + int64_t divisor = 1; + for (size_t p = 9; p < precision; p++) { + divisor *= 10; + } + ns = value / divisor; + } + + builder.UnsafeAppend(ns); + } + + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder.Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + } else { return error::InvalidArgument("Unsupported ClickHouse type for conversion: $0", type_name); } diff --git a/src/carnot/exec/exec_graph.cc b/src/carnot/exec/exec_graph.cc index 705cf381e38..8b672d3e700 100644 --- a/src/carnot/exec/exec_graph.cc +++ b/src/carnot/exec/exec_graph.cc @@ -24,6 +24,7 @@ #include #include "src/carnot/exec/agg_node.h" +#include "src/carnot/exec/clickhouse_source_node.h" #include "src/carnot/exec/empty_source_node.h" #include "src/carnot/exec/equijoin_node.h" #include "src/carnot/exec/exec_node.h" @@ -108,6 +109,10 @@ Status ExecutionGraph::Init(table_store::schema::Schema* schema, plan::PlanState .OnOTelSink([&](auto& node) { return OnOperatorImpl(node, &descriptors); }) + .OnClickHouseSource([&](auto& node) { + return OnOperatorImpl(node, + &descriptors); + }) .Walk(pf_); } diff --git a/src/carnot/plan/plan_fragment.cc b/src/carnot/plan/plan_fragment.cc index 91d60081347..4ec6c3bf6ef 100644 --- a/src/carnot/plan/plan_fragment.cc +++ b/src/carnot/plan/plan_fragment.cc @@ -98,6 +98,9 @@ Status PlanFragmentWalker::CallWalkFn(const Operator& op) { case planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR: PX_RETURN_IF_ERROR(CallAs(on_otel_sink_walk_fn_, op)); break; + case planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR: + PX_RETURN_IF_ERROR(CallAs(on_clickhouse_source_walk_fn_, op)); + break; default: LOG(FATAL) << absl::Substitute("Operator does not exist: $0", magic_enum::enum_name(op_type)); return error::InvalidArgument("Operator does not exist: $0", magic_enum::enum_name(op_type)); diff --git a/src/carnot/plan/plan_fragment.h b/src/carnot/plan/plan_fragment.h index 39b1cea9ceb..6351132ac4c 100644 --- a/src/carnot/plan/plan_fragment.h +++ b/src/carnot/plan/plan_fragment.h @@ -76,6 +76,7 @@ class PlanFragmentWalker { using UDTFSourceWalkFn = std::function; using EmptySourceWalkFn = std::function; using OTelSinkWalkFn = std::function; + using ClickHouseSourceWalkFn = std::function; /** * Register callback for when a memory source operator is encountered. @@ -181,6 +182,12 @@ class PlanFragmentWalker { on_otel_sink_walk_fn_ = fn; return *this; } + + PlanFragmentWalker& OnClickHouseSource(const ClickHouseSourceWalkFn& fn) { + on_clickhouse_source_walk_fn_ = fn; + return *this; + } + /** * Perform a walk of the plan fragment operators in a topologically-sorted order. * @param plan_fragment The plan fragment to walk. @@ -206,6 +213,7 @@ class PlanFragmentWalker { UDTFSourceWalkFn on_udtf_source_walk_fn_; EmptySourceWalkFn on_empty_source_walk_fn_; OTelSinkWalkFn on_otel_sink_walk_fn_; + ClickHouseSourceWalkFn on_clickhouse_source_walk_fn_; }; } // namespace plan diff --git a/src/carnot/planner/ir/all_ir_nodes.h b/src/carnot/planner/ir/all_ir_nodes.h index 5c0b49744cd..23b278d16d3 100644 --- a/src/carnot/planner/ir/all_ir_nodes.h +++ b/src/carnot/planner/ir/all_ir_nodes.h @@ -20,6 +20,7 @@ #include "src/carnot/planner/ir/blocking_agg_ir.h" #include "src/carnot/planner/ir/bool_ir.h" +#include "src/carnot/planner/ir/clickhouse_source_ir.h" #include "src/carnot/planner/ir/column_ir.h" #include "src/carnot/planner/ir/data_ir.h" #include "src/carnot/planner/ir/drop_ir.h" diff --git a/src/carnot/planner/ir/clickhouse_source_ir.cc b/src/carnot/planner/ir/clickhouse_source_ir.cc new file mode 100644 index 00000000000..10bcfb1ef26 --- /dev/null +++ b/src/carnot/planner/ir/clickhouse_source_ir.cc @@ -0,0 +1,147 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/carnot/planner/ir/clickhouse_source_ir.h" +#include "src/carnot/planner/ir/ir.h" + +namespace px { +namespace carnot { +namespace planner { + +std::string ClickHouseSourceIR::DebugString() const { + return absl::Substitute("$0(id=$1, table=$2)", type_string(), id(), table_name_); +} + +Status ClickHouseSourceIR::ToProto(planpb::Operator* op) const { + auto pb = op->mutable_clickhouse_source_op(); + op->set_op_type(planpb::CLICKHOUSE_SOURCE_OPERATOR); + + // TODO(ddelnano): Set ClickHouse connection parameters from config + pb->set_host("localhost"); + pb->set_port(9000); + pb->set_username("default"); + pb->set_password("test_password"); + pb->set_database("default"); + + // Build the query + pb->set_query(absl::Substitute("SELECT * FROM $0", table_name_)); + + if (!column_index_map_set()) { + return error::InvalidArgument("ClickHouseSource columns are not set."); + } + + DCHECK(is_type_resolved()); + DCHECK_EQ(column_index_map_.size(), resolved_table_type()->ColumnNames().size()); + for (const auto& [idx, col_name] : Enumerate(resolved_table_type()->ColumnNames())) { + pb->add_column_names(col_name); + auto val_type = std::static_pointer_cast( + resolved_table_type()->GetColumnType(col_name).ConsumeValueOrDie()); + pb->add_column_types(val_type->data_type()); + } + + if (IsTimeStartSet()) { + pb->set_start_time(time_start_ns()); + } + + if (IsTimeStopSet()) { + pb->set_end_time(time_stop_ns()); + } + + // Set batch size + pb->set_batch_size(1024); + + // Set default timestamp and partition columns (can be configured later) + pb->set_timestamp_column("time_"); + pb->set_partition_column("hostname"); + + return Status::OK(); +} + +Status ClickHouseSourceIR::Init(const std::string& table_name, + const std::vector& select_columns) { + table_name_ = table_name; + column_names_ = select_columns; + return Status::OK(); +} + +StatusOr> ClickHouseSourceIR::PruneOutputColumnsToImpl( + const absl::flat_hash_set& output_colnames) { + DCHECK(column_index_map_set()); + DCHECK(is_type_resolved()); + std::vector new_col_names; + std::vector new_col_index_map; + + auto col_names = resolved_table_type()->ColumnNames(); + for (const auto& [idx, name] : Enumerate(col_names)) { + if (output_colnames.contains(name)) { + new_col_names.push_back(name); + new_col_index_map.push_back(column_index_map_[idx]); + } + } + if (new_col_names != resolved_table_type()->ColumnNames()) { + column_names_ = new_col_names; + } + column_index_map_ = new_col_index_map; + return output_colnames; +} + +Status ClickHouseSourceIR::CopyFromNodeImpl(const IRNode* node, + absl::flat_hash_map*) { + const ClickHouseSourceIR* source_ir = static_cast(node); + + table_name_ = source_ir->table_name_; + time_start_ns_ = source_ir->time_start_ns_; + time_stop_ns_ = source_ir->time_stop_ns_; + column_names_ = source_ir->column_names_; + column_index_map_set_ = source_ir->column_index_map_set_; + column_index_map_ = source_ir->column_index_map_; + + return Status::OK(); +} + +Status ClickHouseSourceIR::ResolveType(CompilerState* compiler_state) { + auto relation_it = compiler_state->relation_map()->find(table_name()); + if (relation_it == compiler_state->relation_map()->end()) { + return CreateIRNodeError("Table '$0' not found.", table_name_); + } + auto table_relation = relation_it->second; + auto full_table_type = TableType::Create(table_relation); + if (select_all()) { + std::vector column_indices; + for (int64_t i = 0; i < static_cast(table_relation.NumColumns()); ++i) { + column_indices.push_back(i); + } + SetColumnIndexMap(column_indices); + return SetResolvedType(full_table_type); + } + + std::vector column_indices; + auto new_table = TableType::Create(); + for (const auto& col_name : column_names_) { + PX_ASSIGN_OR_RETURN(auto col_type, full_table_type->GetColumnType(col_name)); + new_table->AddColumn(col_name, col_type); + column_indices.push_back(table_relation.GetColumnIndex(col_name)); + } + + SetColumnIndexMap(column_indices); + return SetResolvedType(new_table); +} + +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/ir/clickhouse_source_ir.h b/src/carnot/planner/ir/clickhouse_source_ir.h new file mode 100644 index 00000000000..6b793196a31 --- /dev/null +++ b/src/carnot/planner/ir/clickhouse_source_ir.h @@ -0,0 +1,111 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include +#include +#include + +#include "src/carnot/planner/compiler_state/compiler_state.h" +#include "src/carnot/planner/ir/expression_ir.h" +#include "src/carnot/planner/ir/operator_ir.h" +#include "src/carnot/planner/types/types.h" +#include "src/common/base/base.h" +#include "src/shared/types/types.h" + +namespace px { +namespace carnot { +namespace planner { + +/** + * @brief The ClickHouseSourceIR represents a source that reads data from a ClickHouse database. + */ +class ClickHouseSourceIR : public OperatorIR { + public: + ClickHouseSourceIR() = delete; + explicit ClickHouseSourceIR(int64_t id) : OperatorIR(id, IRNodeType::kClickHouseSource) {} + + /** + * @brief Initialize the ClickHouse source. + * + * @param table_name the table to load. + * @param select_columns the columns to select. If vector is empty, then select all columns. + * @return Status + */ + Status Init(const std::string& table_name, const std::vector& select_columns); + + std::string table_name() const { return table_name_; } + + void SetTimeStartNS(int64_t time_start_ns) { time_start_ns_ = time_start_ns; } + void SetTimeStopNS(int64_t time_stop_ns) { time_stop_ns_ = time_stop_ns; } + bool IsTimeStartSet() const { return time_start_ns_.has_value(); } + bool IsTimeStopSet() const { return time_stop_ns_.has_value(); } + + std::string DebugString() const override; + + int64_t time_start_ns() const { return time_start_ns_.value(); } + int64_t time_stop_ns() const { return time_stop_ns_.value(); } + + const std::vector& column_index_map() const { return column_index_map_; } + bool column_index_map_set() const { return column_index_map_set_; } + void SetColumnIndexMap(const std::vector& column_index_map) { + column_index_map_set_ = true; + column_index_map_ = column_index_map; + } + + Status ToProto(planpb::Operator*) const override; + + bool select_all() const { return column_names_.size() == 0; } + + Status CopyFromNodeImpl(const IRNode* node, + absl::flat_hash_map* copied_nodes_map) override; + const std::vector& column_names() const { return column_names_; } + + StatusOr>> RequiredInputColumns() const override { + return std::vector>{}; + } + + void SetColumnNames(const std::vector& col_names) { column_names_ = col_names; } + + bool IsSource() const override { return true; } + + Status ResolveType(CompilerState* compiler_state); + + protected: + StatusOr> PruneOutputColumnsToImpl( + const absl::flat_hash_set& output_colnames) override; + + private: + std::string table_name_; + + std::optional time_start_ns_; + std::optional time_stop_ns_; + + // Hold of columns in the order that they are selected. + std::vector column_names_; + + // The mapping of the source's column indices to the current columns, as given by column_names_. + std::vector column_index_map_; + bool column_index_map_set_ = false; +}; + +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/ir/operators.inl b/src/carnot/planner/ir/operators.inl index 817295e3a6e..9ace7fd94e6 100644 --- a/src/carnot/planner/ir/operators.inl +++ b/src/carnot/planner/ir/operators.inl @@ -37,5 +37,6 @@ PX_CARNOT_IR_NODE(Rolling) PX_CARNOT_IR_NODE(Stream) PX_CARNOT_IR_NODE(EmptySource) PX_CARNOT_IR_NODE(OTelExportSink) +PX_CARNOT_IR_NODE(ClickHouseSource) #endif diff --git a/src/carnot/planner/logical_planner_test.cc b/src/carnot/planner/logical_planner_test.cc index c40453b4a75..8d62e1f5175 100644 --- a/src/carnot/planner/logical_planner_test.cc +++ b/src/carnot/planner/logical_planner_test.cc @@ -1043,7 +1043,8 @@ constexpr char kClickHouseSourceQuery[] = R"pxl( import px # Test ClickHouse source node functionality -df = px.DataFrame('http_events', start_time='-10m', end_time='-5m') +df = px.DataFrame('http_events', start_time='-10m', end_time='-5m', clickhouse=True) +df = df['time_', 'req_headers'] px.display(df, 'clickhouse_data') )pxl"; diff --git a/src/carnot/planner/objects/dataframe.cc b/src/carnot/planner/objects/dataframe.cc index 13140b40e17..fbaabda1844 100644 --- a/src/carnot/planner/objects/dataframe.cc +++ b/src/carnot/planner/objects/dataframe.cc @@ -19,6 +19,7 @@ #include "src/carnot/planner/objects/dataframe.h" #include "src/carnot/planner/ast/ast_visitor.h" #include "src/carnot/planner/ir/ast_utils.h" +#include "src/carnot/planner/ir/clickhouse_source_ir.h" #include "src/carnot/planner/objects/collection_object.h" #include "src/carnot/planner/objects/expr_object.h" #include "src/carnot/planner/objects/funcobject.h" @@ -109,22 +110,50 @@ StatusOr DataFrameConstructor(CompilerState* compiler_state, IR* gr PX_ASSIGN_OR_RETURN(std::vector columns, ParseAsListOfStrings(args.GetArg("select"), "select")); std::string table_name = table->str(); - PX_ASSIGN_OR_RETURN(MemorySourceIR * mem_source_op, - graph->CreateNode(ast, table_name, columns)); - - if (!NoneObject::IsNoneObject(args.GetArg("start_time"))) { - PX_ASSIGN_OR_RETURN(ExpressionIR * start_time, GetArgAs(ast, args, "start_time")); - PX_ASSIGN_OR_RETURN(auto start_time_ns, - ParseAllTimeFormats(compiler_state->time_now().val, start_time)); - mem_source_op->SetTimeStartNS(start_time_ns); - } - if (!NoneObject::IsNoneObject(args.GetArg("end_time"))) { - PX_ASSIGN_OR_RETURN(ExpressionIR * end_time, GetArgAs(ast, args, "end_time")); - PX_ASSIGN_OR_RETURN(auto end_time_ns, - ParseAllTimeFormats(compiler_state->time_now().val, end_time)); - mem_source_op->SetTimeStopNS(end_time_ns); + + // Check if we should use ClickHouse or memory source + PX_ASSIGN_OR_RETURN(BoolIR * use_clickhouse, GetArgAs(ast, args, "clickhouse")); + bool is_clickhouse = use_clickhouse->val(); + + if (is_clickhouse) { + // Create ClickHouseSourceIR + PX_ASSIGN_OR_RETURN(ClickHouseSourceIR * clickhouse_source_op, + graph->CreateNode(ast, table_name, columns)); + + if (!NoneObject::IsNoneObject(args.GetArg("start_time"))) { + PX_ASSIGN_OR_RETURN(ExpressionIR * start_time, + GetArgAs(ast, args, "start_time")); + PX_ASSIGN_OR_RETURN(auto start_time_ns, + ParseAllTimeFormats(compiler_state->time_now().val, start_time)); + clickhouse_source_op->SetTimeStartNS(start_time_ns); + } + if (!NoneObject::IsNoneObject(args.GetArg("end_time"))) { + PX_ASSIGN_OR_RETURN(ExpressionIR * end_time, GetArgAs(ast, args, "end_time")); + PX_ASSIGN_OR_RETURN(auto end_time_ns, + ParseAllTimeFormats(compiler_state->time_now().val, end_time)); + clickhouse_source_op->SetTimeStopNS(end_time_ns); + } + return Dataframe::Create(compiler_state, clickhouse_source_op, visitor); + } else { + // Create MemorySourceIR (existing behavior) + PX_ASSIGN_OR_RETURN(MemorySourceIR * mem_source_op, + graph->CreateNode(ast, table_name, columns)); + + if (!NoneObject::IsNoneObject(args.GetArg("start_time"))) { + PX_ASSIGN_OR_RETURN(ExpressionIR * start_time, + GetArgAs(ast, args, "start_time")); + PX_ASSIGN_OR_RETURN(auto start_time_ns, + ParseAllTimeFormats(compiler_state->time_now().val, start_time)); + mem_source_op->SetTimeStartNS(start_time_ns); + } + if (!NoneObject::IsNoneObject(args.GetArg("end_time"))) { + PX_ASSIGN_OR_RETURN(ExpressionIR * end_time, GetArgAs(ast, args, "end_time")); + PX_ASSIGN_OR_RETURN(auto end_time_ns, + ParseAllTimeFormats(compiler_state->time_now().val, end_time)); + mem_source_op->SetTimeStopNS(end_time_ns); + } + return Dataframe::Create(compiler_state, mem_source_op, visitor); } - return Dataframe::Create(compiler_state, mem_source_op, visitor); } StatusOr> ProcessCols(IR* graph, const pypa::AstPtr& ast, QLObjectPtr obj, @@ -423,8 +452,8 @@ Status Dataframe::Init() { PX_ASSIGN_OR_RETURN( std::shared_ptr constructor_fn, FuncObject::Create( - name(), {"table", "select", "start_time", "end_time"}, - {{"select", "[]"}, {"start_time", "None"}, {"end_time", "None"}}, + name(), {"table", "select", "start_time", "end_time", "clickhouse"}, + {{"select", "[]"}, {"start_time", "None"}, {"end_time", "None"}, {"clickhouse", "False"}}, /* has_variable_len_args */ false, /* has_variable_len_kwargs */ false, std::bind(&DataFrameConstructor, compiler_state_, graph(), std::placeholders::_1, From 7ded9f8c378ad1a226e9fd60b6450c9e6a437178 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Fri, 3 Oct 2025 05:14:23 +0000 Subject: [PATCH 63/86] Get the carnot_executable version working e2e Signed-off-by: Dom Del Nano --- src/carnot/carnot_executable.cc | 22 +++++++++++++++++++++ src/carnot/exec/BUILD.bazel | 3 --- src/carnot/exec/clickhouse_source_node.cc | 24 ++++++++++++++++++----- 3 files changed, 41 insertions(+), 8 deletions(-) diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index 0c59dd41699..12468a53567 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -490,6 +490,28 @@ int main(int argc, char* argv[]) { LOG(FATAL) << absl::Substitute("Query failed to execute: $0", exec_status.msg()); } + // Get and log execution stats + auto exec_stats_or = result_server.exec_stats(); + if (exec_stats_or.ok()) { + auto exec_stats = exec_stats_or.ConsumeValueOrDie(); + if (exec_stats.has_execution_stats()) { + auto stats = exec_stats.execution_stats(); + LOG(INFO) << "Query Execution Stats:"; + LOG(INFO) << " Bytes processed: " << stats.bytes_processed(); + LOG(INFO) << " Records processed: " << stats.records_processed(); + if (stats.has_timing()) { + LOG(INFO) << " Execution time: " << stats.timing().execution_time_ns() << " ns"; + } + } + + for (const auto& agent_stats : exec_stats.agent_execution_stats()) { + LOG(INFO) << "Agent Execution Stats:"; + LOG(INFO) << " Execution time: " << agent_stats.execution_time_ns() << " ns"; + LOG(INFO) << " Bytes processed: " << agent_stats.bytes_processed(); + LOG(INFO) << " Records processed: " << agent_stats.records_processed(); + } + } + auto output_names = result_server.output_tables(); if (!output_names.size()) { LOG(FATAL) << "Query produced no output tables."; diff --git a/src/carnot/exec/BUILD.bazel b/src/carnot/exec/BUILD.bazel index 5dcc483ca05..5a21ac61558 100644 --- a/src/carnot/exec/BUILD.bazel +++ b/src/carnot/exec/BUILD.bazel @@ -306,8 +306,6 @@ pl_cc_test( pl_cc_test( name = "clickhouse_source_node_test", timeout = "long", - timeout = "long", - timeout = "long", srcs = ["clickhouse_source_node_test.cc"], data = [ "//src/stirling/source_connectors/socket_tracer/testing/container_images:clickhouse.tar", @@ -324,5 +322,4 @@ pl_cc_test( "//src/common/testing/test_utils:cc_library", "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", ], - timeout = "long", ) diff --git a/src/carnot/exec/clickhouse_source_node.cc b/src/carnot/exec/clickhouse_source_node.cc index 8cc2a8bab86..b9017200f85 100644 --- a/src/carnot/exec/clickhouse_source_node.cc +++ b/src/carnot/exec/clickhouse_source_node.cc @@ -302,7 +302,8 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); } else if (type_name == "DateTime") { auto typed_col = ch_column->As(); - arrow::Int64Builder builder; + arrow::Time64Builder builder(arrow::time64(arrow::TimeUnit::NANO), + arrow::default_memory_pool()); PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); for (size_t i = 0; i < num_rows; ++i) { @@ -317,7 +318,8 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock } else if (type_name.find("DateTime64") == 0) { auto typed_col = ch_column->As(); - arrow::Int64Builder builder; + arrow::Time64Builder builder(arrow::time64(arrow::TimeUnit::NANO), + arrow::default_memory_pool()); PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); for (size_t i = 0; i < num_rows; ++i) { @@ -551,7 +553,8 @@ Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { builder = std::make_shared(); break; case types::DataType::TIME64NS: - builder = std::make_shared(); + builder = std::make_shared(arrow::time64(arrow::TimeUnit::NANO), + arrow::default_memory_pool()); break; default: return error::InvalidArgument("Unsupported data type for column $0", col_idx); @@ -567,8 +570,7 @@ Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { // Append values from this block's array switch (data_type) { - case types::DataType::INT64: - case types::DataType::TIME64NS: { + case types::DataType::INT64: { auto typed_array = std::static_pointer_cast(array); auto typed_builder = std::static_pointer_cast(builder); for (int i = 0; i < typed_array->length(); i++) { @@ -580,6 +582,18 @@ Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { } break; } + case types::DataType::TIME64NS: { + auto typed_array = std::static_pointer_cast(array); + auto typed_builder = std::static_pointer_cast(builder); + for (int i = 0; i < typed_array->length(); i++) { + if (typed_array->IsNull(i)) { + PX_RETURN_IF_ERROR(typed_builder->AppendNull()); + } else { + typed_builder->UnsafeAppend(typed_array->Value(i)); + } + } + break; + } case types::DataType::FLOAT64: { auto typed_array = std::static_pointer_cast(array); auto typed_builder = std::static_pointer_cast(builder); From 456219bb15ffe8998bc2e3d8e0c399c7f0cf31cb Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Mon, 6 Oct 2025 03:54:00 +0000 Subject: [PATCH 64/86] Add clickhouse config to compiler and logical state protos Signed-off-by: Dom Del Nano --- src/carnot/carnot_executable.cc | 39 ++++++++++++++++++- src/carnot/exec/clickhouse_source_node.cc | 7 ++-- .../planner/compiler_state/compiler_state.h | 8 +++- src/carnot/planner/logical_planner.cc | 15 ++++++- src/carnot/planner/plannerpb/service.proto | 16 ++++++++ src/carnot/planpb/plan.proto | 16 ++++++++ 6 files changed, 93 insertions(+), 8 deletions(-) diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index 12468a53567..767344a2fd4 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -39,6 +39,10 @@ #include "src/shared/types/type_utils.h" #include "src/table_store/table_store.h" +// Example clickhouse test usage: +// The records inserted into clickhouse exist between -10m and -5m +// bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --query="import px;df = px.DataFrame('http_events', clickhouse=True, start_time='-10m', end_time='-9m'); px.display(df)" --output_file=$(pwd)/output.csv + DEFINE_string(input_file, gflags::StringFromEnv("INPUT_FILE", ""), "The csv containing data to run the query on."); @@ -330,7 +334,38 @@ void CreateHttpEventsTable(clickhouse::Client* client) { // Add sample rows std::time_t now = std::time(nullptr); - for (int i = 0; i < 10; ++i) { + LOG(INFO) << "Current time: " << now; + + // Get current hostname + char current_hostname[256]; + gethostname(current_hostname, sizeof(current_hostname)); + std::string hostname_str(current_hostname); + + // Add 5 records with the current hostname + for (int i = 0; i < 5; ++i) { + time_col->Append((now - 600 + i * 60) * 1000000000LL); // Convert to nanoseconds + local_addr_col->Append("127.0.0.1"); + local_port_col->Append(8080); + remote_addr_col->Append(absl::StrFormat("192.168.1.%d", 100 + i)); + remote_port_col->Append(50000 + i); + major_version_col->Append(1); + minor_version_col->Append(1); + content_type_col->Append(0); + req_headers_col->Append("Content-Type: application/json"); + req_method_col->Append(i % 2 == 0 ? "GET" : "POST"); + req_path_col->Append(absl::StrFormat("/api/v1/resource/%d", i)); + req_body_col->Append(i % 2 == 0 ? "" : "{\"data\": \"test\"}"); + resp_headers_col->Append("Content-Type: application/json"); + resp_status_col->Append(200); + resp_message_col->Append("OK"); + resp_body_col->Append("{\"result\": \"success\"}"); + resp_latency_ns_col->Append(1000000 + i * 100000); + hostname_col->Append(hostname_str); + event_time_col->Append((now - 600 + i * 60) * 1000LL); // Convert to milliseconds + } + + // Add 5 more records with different hostnames for testing + for (int i = 5; i < 10; ++i) { time_col->Append((now - 600 + i * 60) * 1000000000LL); // Convert to nanoseconds local_addr_col->Append("127.0.0.1"); local_port_col->Append(8080); @@ -348,7 +383,7 @@ void CreateHttpEventsTable(clickhouse::Client* client) { resp_message_col->Append("OK"); resp_body_col->Append("{\"result\": \"success\"}"); resp_latency_ns_col->Append(1000000 + i * 100000); - hostname_col->Append(absl::StrFormat("host-%d", i % 3)); + hostname_col->Append(absl::StrFormat("other-host-%d", i % 3)); event_time_col->Append((now - 600 + i * 60) * 1000LL); // Convert to milliseconds } diff --git a/src/carnot/exec/clickhouse_source_node.cc b/src/carnot/exec/clickhouse_source_node.cc index b9017200f85..885dc1eebfd 100644 --- a/src/carnot/exec/clickhouse_source_node.cc +++ b/src/carnot/exec/clickhouse_source_node.cc @@ -392,9 +392,10 @@ std::string ClickHouseSourceNode::BuildQuery() { // Add partition column filtering if specified if (!partition_column_.empty()) { - // TODO(ddelnano): For now, we assume the partition column filtering is handled by the base - // query In a real implementation, we might need to add specific partition filtering logic This - // could involve extracting partition values from the time range or other criteria + // Get the current hostname for partition filtering + char hostname[256]; + gethostname(hostname, sizeof(hostname)); + conditions.push_back(absl::Substitute("$0 = '$1'", partition_column_, hostname)); } // Parse the base query to find WHERE and ORDER BY positions diff --git a/src/carnot/planner/compiler_state/compiler_state.h b/src/carnot/planner/compiler_state/compiler_state.h index cd2e7902f0c..c25a14fe64d 100644 --- a/src/carnot/planner/compiler_state/compiler_state.h +++ b/src/carnot/planner/compiler_state/compiler_state.h @@ -119,7 +119,8 @@ class CompilerState : public NotCopyable { int64_t max_output_rows_per_table, std::string_view result_address, std::string_view result_ssl_targetname, const RedactionOptions& redaction_options, std::unique_ptr endpoint_config, - std::unique_ptr plugin_config, DebugInfo debug_info) + std::unique_ptr plugin_config, DebugInfo debug_info, + std::unique_ptr clickhouse_config = nullptr) : relation_map_(std::move(relation_map)), table_names_to_sensitive_columns_(table_names_to_sensitive_columns), registry_info_(registry_info), @@ -130,7 +131,8 @@ class CompilerState : public NotCopyable { redaction_options_(redaction_options), endpoint_config_(std::move(endpoint_config)), plugin_config_(std::move(plugin_config)), - debug_info_(std::move(debug_info)) {} + debug_info_(std::move(debug_info)), + clickhouse_config_(std::move(clickhouse_config)) {} CompilerState() = delete; @@ -175,6 +177,7 @@ class CompilerState : public NotCopyable { planpb::OTelEndpointConfig* endpoint_config() { return endpoint_config_.get(); } PluginConfig* plugin_config() { return plugin_config_.get(); } const DebugInfo& debug_info() { return debug_info_; } + planpb::ClickHouseConfig* clickhouse_config() { return clickhouse_config_.get(); } private: std::unique_ptr relation_map_; @@ -191,6 +194,7 @@ class CompilerState : public NotCopyable { std::unique_ptr endpoint_config_ = nullptr; std::unique_ptr plugin_config_ = nullptr; DebugInfo debug_info_; + std::unique_ptr clickhouse_config_ = nullptr; }; } // namespace planner diff --git a/src/carnot/planner/logical_planner.cc b/src/carnot/planner/logical_planner.cc index 19ed07104cf..c2ab8d53a9e 100644 --- a/src/carnot/planner/logical_planner.cc +++ b/src/carnot/planner/logical_planner.cc @@ -97,6 +97,18 @@ StatusOr> CreateCompilerState( for (const auto& debug_info_pb : logical_state.debug_info().otel_debug_attributes()) { debug_info.otel_debug_attrs.push_back({debug_info_pb.name(), debug_info_pb.value()}); } + + std::unique_ptr clickhouse_config = nullptr; + if (logical_state.has_clickhouse_config()) { + clickhouse_config = std::make_unique(); + clickhouse_config->set_hostname(logical_state.clickhouse_config().hostname()); + clickhouse_config->set_host(logical_state.clickhouse_config().host()); + clickhouse_config->set_port(logical_state.clickhouse_config().port()); + clickhouse_config->set_username(logical_state.clickhouse_config().username()); + clickhouse_config->set_password(logical_state.clickhouse_config().password()); + clickhouse_config->set_database(logical_state.clickhouse_config().database()); + } + // Create a CompilerState obj using the relation map and grabbing the current time. return std::make_unique( std::move(rel_map), sensitive_columns, registry_info, px::CurrentTimeNS(), @@ -105,7 +117,8 @@ StatusOr> CreateCompilerState( // TODO(philkuz) add an endpoint config to logical_state and pass that in here. RedactionOptionsFromPb(logical_state.redaction_options()), std::move(otel_endpoint_config), // TODO(philkuz) propagate the otel debug attributes here. - std::move(plugin_config), debug_info); + std::move(plugin_config), debug_info, + std::move(clickhouse_config)); } StatusOr> LogicalPlanner::Create(const udfspb::UDFInfo& udf_info) { diff --git a/src/carnot/planner/plannerpb/service.proto b/src/carnot/planner/plannerpb/service.proto index a9b33d825f8..4c3fd9a99a8 100644 --- a/src/carnot/planner/plannerpb/service.proto +++ b/src/carnot/planner/plannerpb/service.proto @@ -75,6 +75,22 @@ message Configs { int64 end_time_ns = 2; } PluginConfig plugin_config = 2; + // ClickHouseConfig contains information about ClickHouse connection parameters. + message ClickHouseConfig { + // The hostname of the node executing the query. + string hostname = 1; + // The ClickHouse server host. + string host = 2; + // The ClickHouse server port. + int32 port = 3; + // The ClickHouse username. + string username = 4; + // The ClickHouse password. + string password = 5; + // The ClickHouse database name. + string database = 6; + } + ClickHouseConfig clickhouse_config = 3; } // QueryRequest is the body of the request made to the planner. diff --git a/src/carnot/planpb/plan.proto b/src/carnot/planpb/plan.proto index 49849b7abbf..acc95bfaceb 100644 --- a/src/carnot/planpb/plan.proto +++ b/src/carnot/planpb/plan.proto @@ -565,6 +565,22 @@ message OTelEndpointConfig { int64 timeout = 4; } +// ClickHouseConfig contains the connection parameters for ClickHouse. +message ClickHouseConfig { + // The hostname of the node executing the query. + string hostname = 1; + // The ClickHouse server host. + string host = 2; + // The ClickHouse server port. + int32 port = 3; + // The ClickHouse username. + string username = 4; + // The ClickHouse password. + string password = 5; + // The ClickHouse database name. + string database = 6; +} + // Defines a resource. Discussed in depth in the OpenTelemetry spec. // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md message OTelResource { From 0ad9cea78508e5fb4a131360e3face219eceb652 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Mon, 6 Oct 2025 05:51:17 +0000 Subject: [PATCH 65/86] Add clickhouse export support. IR and node tests passing Signed-off-by: Dom Del Nano --- src/carnot/exec/BUILD.bazel | 22 ++ .../exec/clickhouse_export_sink_node.cc | 166 +++++++++ src/carnot/exec/clickhouse_export_sink_node.h | 55 +++ .../exec/clickhouse_export_sink_node_test.cc | 351 ++++++++++++++++++ src/carnot/exec/exec_graph.cc | 5 + src/carnot/plan/operators.cc | 23 ++ src/carnot/plan/operators.h | 23 ++ src/carnot/plan/plan_fragment.cc | 3 + src/carnot/plan/plan_fragment.h | 7 + src/carnot/planner/ir/BUILD.bazel | 8 + src/carnot/planner/ir/all_ir_nodes.h | 1 + .../planner/ir/clickhouse_export_sink_ir.cc | 94 +++++ .../planner/ir/clickhouse_export_sink_ir.h | 75 ++++ .../ir/clickhouse_export_sink_ir_test.cc | 157 ++++++++ src/carnot/planner/ir/operators.inl | 1 + src/carnot/planner/objects/otel.cc | 33 ++ src/carnot/planner/objects/otel.h | 42 +++ src/carnot/planner/objects/qlobject.h | 1 + src/carnot/planpb/plan.proto | 21 ++ 19 files changed, 1088 insertions(+) create mode 100644 src/carnot/exec/clickhouse_export_sink_node.cc create mode 100644 src/carnot/exec/clickhouse_export_sink_node.h create mode 100644 src/carnot/exec/clickhouse_export_sink_node_test.cc create mode 100644 src/carnot/planner/ir/clickhouse_export_sink_ir.cc create mode 100644 src/carnot/planner/ir/clickhouse_export_sink_ir.h create mode 100644 src/carnot/planner/ir/clickhouse_export_sink_ir_test.cc diff --git a/src/carnot/exec/BUILD.bazel b/src/carnot/exec/BUILD.bazel index 5a21ac61558..625a6964d59 100644 --- a/src/carnot/exec/BUILD.bazel +++ b/src/carnot/exec/BUILD.bazel @@ -323,3 +323,25 @@ pl_cc_test( "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", ], ) + +pl_cc_test( + name = "clickhouse_export_sink_node_test", + timeout = "long", + srcs = ["clickhouse_export_sink_node_test.cc"], + data = [ + "//src/stirling/source_connectors/socket_tracer/testing/container_images:clickhouse.tar", + ], + tags = [ + "exclusive", + "requires_docker", + ], + deps = [ + ":cc_library", + ":exec_node_test_helpers", + ":test_utils", + "//src/carnot/plan:cc_library", + "//src/carnot/planpb:plan_pl_cc_proto", + "//src/common/testing/test_utils:cc_library", + "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", + ], +) diff --git a/src/carnot/exec/clickhouse_export_sink_node.cc b/src/carnot/exec/clickhouse_export_sink_node.cc new file mode 100644 index 00000000000..73b23e58be1 --- /dev/null +++ b/src/carnot/exec/clickhouse_export_sink_node.cc @@ -0,0 +1,166 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/carnot/exec/clickhouse_export_sink_node.h" + +#include +#include +#include + +#include +#include "glog/logging.h" +#include "src/carnot/planpb/plan.pb.h" +#include "src/common/base/macros.h" +#include "src/shared/types/typespb/types.pb.h" +#include "src/table_store/table_store.h" + +namespace px { +namespace carnot { +namespace exec { + +using table_store::schema::RowBatch; +using table_store::schema::RowDescriptor; + +std::string ClickHouseExportSinkNode::DebugStringImpl() { + return absl::Substitute("Exec::ClickHouseExportSinkNode: $0", plan_node_->DebugString()); +} + +Status ClickHouseExportSinkNode::InitImpl(const plan::Operator& plan_node) { + CHECK(plan_node.op_type() == planpb::OperatorType::CLICKHOUSE_EXPORT_SINK_OPERATOR); + if (input_descriptors_.size() != 1) { + return error::InvalidArgument( + "ClickHouse Export operator expects a single input relation, got $0", + input_descriptors_.size()); + } + + input_descriptor_ = std::make_unique(input_descriptors_[0]); + const auto* sink_plan_node = static_cast(&plan_node); + plan_node_ = std::make_unique(*sink_plan_node); + return Status::OK(); +} + +Status ClickHouseExportSinkNode::PrepareImpl(ExecState*) { return Status::OK(); } + +Status ClickHouseExportSinkNode::OpenImpl(ExecState* /*exec_state*/) { + // Connect to ClickHouse using config from plan node + const auto& config = plan_node_->clickhouse_config(); + + clickhouse::ClientOptions options; + options.SetHost(config.host()); + options.SetPort(config.port()); + options.SetUser(config.username()); + options.SetPassword(config.password()); + options.SetDefaultDatabase(config.database()); + + clickhouse_client_ = std::make_unique(options); + + return Status::OK(); +} + +Status ClickHouseExportSinkNode::CloseImpl(ExecState* exec_state) { + if (sent_eos_) { + return Status::OK(); + } + + LOG(INFO) << absl::Substitute( + "Closing ClickHouseExportSinkNode $0 in query $1 before receiving EOS", plan_node_->id(), + exec_state->query_id().str()); + + return Status::OK(); +} + +Status ClickHouseExportSinkNode::ConsumeNextImpl(ExecState* /*exec_state*/, const RowBatch& rb, + size_t /*parent_index*/) { + // Skip insertion if the batch is empty + if (rb.num_rows() == 0) { + if (rb.eos()) { + sent_eos_ = true; + } + return Status::OK(); + } + + // Build an INSERT query with the data from the row batch + clickhouse::Block block; + + // Create columns based on the column mappings + for (const auto& mapping : plan_node_->column_mappings()) { + auto arrow_col = rb.ColumnAt(mapping.input_column_index()); + int64_t num_rows = arrow_col->length(); + + // Create ClickHouse column based on data type + switch (mapping.column_type()) { + case types::INT64: { + auto col = std::make_shared(); + for (int64_t i = 0; i < num_rows; ++i) { + col->Append(types::GetValueFromArrowArray(arrow_col.get(), i)); + } + block.AppendColumn(mapping.clickhouse_column_name(), col); + break; + } + case types::FLOAT64: { + auto col = std::make_shared(); + for (int64_t i = 0; i < num_rows; ++i) { + col->Append(types::GetValueFromArrowArray(arrow_col.get(), i)); + } + block.AppendColumn(mapping.clickhouse_column_name(), col); + break; + } + case types::STRING: { + auto col = std::make_shared(); + for (int64_t i = 0; i < num_rows; ++i) { + col->Append(types::GetValueFromArrowArray(arrow_col.get(), i)); + } + block.AppendColumn(mapping.clickhouse_column_name(), col); + break; + } + case types::TIME64NS: { + auto col = std::make_shared(9); + for (int64_t i = 0; i < num_rows; ++i) { + int64_t ns_val = types::GetValueFromArrowArray(arrow_col.get(), i); + col->Append(ns_val); + } + block.AppendColumn(mapping.clickhouse_column_name(), col); + break; + } + case types::BOOLEAN: { + auto col = std::make_shared(); + for (int64_t i = 0; i < num_rows; ++i) { + col->Append(types::GetValueFromArrowArray(arrow_col.get(), i) ? 1 : 0); + } + block.AppendColumn(mapping.clickhouse_column_name(), col); + break; + } + default: + return error::InvalidArgument("Unsupported data type for ClickHouse export: $0", + types::ToString(mapping.column_type())); + } + } + + // Insert the block into ClickHouse + clickhouse_client_->Insert(plan_node_->table_name(), block); + + if (rb.eos()) { + sent_eos_ = true; + } + + return Status::OK(); +} + +} // namespace exec +} // namespace carnot +} // namespace px diff --git a/src/carnot/exec/clickhouse_export_sink_node.h b/src/carnot/exec/clickhouse_export_sink_node.h new file mode 100644 index 00000000000..26478afe037 --- /dev/null +++ b/src/carnot/exec/clickhouse_export_sink_node.h @@ -0,0 +1,55 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ +#pragma once + +#include +#include +#include +#include + +#include "src/carnot/exec/exec_node.h" +#include "src/carnot/planpb/plan.pb.h" +#include "src/common/base/base.h" +#include "src/shared/types/types.h" + +namespace px { +namespace carnot { +namespace exec { + +class ClickHouseExportSinkNode : public SinkNode { + public: + virtual ~ClickHouseExportSinkNode() = default; + + protected: + std::string DebugStringImpl() override; + Status InitImpl(const plan::Operator& plan_node) override; + Status PrepareImpl(ExecState* exec_state) override; + Status OpenImpl(ExecState* exec_state) override; + Status CloseImpl(ExecState* exec_state) override; + Status ConsumeNextImpl(ExecState* exec_state, const table_store::schema::RowBatch& rb, + size_t parent_index) override; + + private: + std::unique_ptr input_descriptor_; + std::unique_ptr clickhouse_client_; + std::unique_ptr plan_node_; +}; + +} // namespace exec +} // namespace carnot +} // namespace px diff --git a/src/carnot/exec/clickhouse_export_sink_node_test.cc b/src/carnot/exec/clickhouse_export_sink_node_test.cc new file mode 100644 index 00000000000..baea2521108 --- /dev/null +++ b/src/carnot/exec/clickhouse_export_sink_node_test.cc @@ -0,0 +1,351 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/carnot/exec/clickhouse_export_sink_node.h" + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "src/carnot/exec/test_utils.h" +#include "src/carnot/plan/operators.h" +#include "src/carnot/planpb/plan.pb.h" +#include "src/carnot/udf/registry.h" +#include "src/common/testing/test_utils/container_runner.h" +#include "src/common/testing/testing.h" +#include "src/shared/types/arrow_adapter.h" +#include "src/shared/types/column_wrapper.h" +#include "src/shared/types/types.h" + +namespace px { +namespace carnot { +namespace exec { + +using table_store::schema::RowBatch; +using table_store::schema::RowDescriptor; +using ::testing::_; + +class ClickHouseExportSinkNodeTest : public ::testing::Test { + protected: + static constexpr char kClickHouseImage[] = + "src/stirling/source_connectors/socket_tracer/testing/container_images/clickhouse.tar"; + static constexpr char kClickHouseReadyMessage[] = "Ready for connections"; + static constexpr int kClickHousePort = 9000; + + void SetUp() override { + // Set up function registry and exec state + func_registry_ = std::make_unique("test_registry"); + auto table_store = std::make_shared(); + exec_state_ = std::make_unique( + func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, + MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + + // Start ClickHouse container + clickhouse_server_ = + std::make_unique(px::testing::BazelRunfilePath(kClickHouseImage), + "clickhouse_export_test", kClickHouseReadyMessage); + + std::vector options = { + absl::Substitute("--publish=$0:$0", kClickHousePort), + "--env=CLICKHOUSE_PASSWORD=test_password", + "--network=host", + }; + + ASSERT_OK(clickhouse_server_->Run(std::chrono::seconds{60}, options, {}, true, + std::chrono::seconds{300})); + + // Give ClickHouse time to initialize + std::this_thread::sleep_for(std::chrono::seconds(5)); + + // Create ClickHouse client for verification + SetupClickHouseClient(); + } + + void TearDown() override { + if (client_) { + client_.reset(); + } + } + + void SetupClickHouseClient() { + clickhouse::ClientOptions client_options; + client_options.SetHost("localhost"); + client_options.SetPort(kClickHousePort); + client_options.SetUser("default"); + client_options.SetPassword("test_password"); + client_options.SetDefaultDatabase("default"); + + const int kMaxRetries = 5; + for (int i = 0; i < kMaxRetries; ++i) { + LOG(INFO) << "Attempting to connect to ClickHouse (attempt " << (i + 1) << "/" << kMaxRetries + << ")..."; + try { + client_ = std::make_unique(client_options); + client_->Execute("SELECT 1"); + break; + } catch (const std::exception& e) { + LOG(WARNING) << "Failed to connect: " << e.what(); + if (i < kMaxRetries - 1) { + std::this_thread::sleep_for(std::chrono::seconds(2)); + } else { + throw; + } + } + } + } + + void CreateExportTable(const std::string& table_name) { + try { + client_->Execute(absl::Substitute("DROP TABLE IF EXISTS $0", table_name)); + + client_->Execute(absl::Substitute(R"( + CREATE TABLE $0 ( + time_ DateTime64(9), + hostname String, + count Int64, + latency Float64 + ) ENGINE = MergeTree() + ORDER BY time_ + )", table_name)); + + LOG(INFO) << "Export table created successfully: " << table_name; + } catch (const std::exception& e) { + LOG(ERROR) << "Failed to create export table: " << e.what(); + throw; + } + } + + std::vector> QueryTable(const std::string& query) { + std::vector> results; + + try { + client_->Select(query, [&](const clickhouse::Block& block) { + for (size_t row_idx = 0; row_idx < block.GetRowCount(); ++row_idx) { + std::vector row; + for (size_t col_idx = 0; col_idx < block.GetColumnCount(); ++col_idx) { + auto col = block[col_idx]; + std::string value; + + if (auto int_col = col->As()) { + value = std::to_string((*int_col)[row_idx]); + } else if (auto uint_col = col->As()) { + value = std::to_string((*uint_col)[row_idx]); + } else if (auto float_col = col->As()) { + value = std::to_string((*float_col)[row_idx]); + } else if (auto str_col = col->As()) { + value = (*str_col)[row_idx]; + } else if (auto dt_col = col->As()) { + value = std::to_string((*dt_col)[row_idx]); + } else { + value = ""; + } + + row.push_back(value); + } + results.push_back(row); + } + }); + } catch (const std::exception& e) { + LOG(ERROR) << "Failed to query table: " << e.what(); + throw; + } + + return results; + } + + std::unique_ptr CreatePlanNode( + const std::string& table_name) { + planpb::Operator op; + op.set_op_type(planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR); + auto* ch_op = op.mutable_clickhouse_sink_op(); + + auto* config = ch_op->mutable_clickhouse_config(); + config->set_host("localhost"); + config->set_port(kClickHousePort); + config->set_username("default"); + config->set_password("test_password"); + config->set_database("default"); + + ch_op->set_table_name(table_name); + + // Add column mappings + auto* mapping0 = ch_op->add_column_mappings(); + mapping0->set_input_column_index(0); + mapping0->set_clickhouse_column_name("time_"); + mapping0->set_column_type(types::TIME64NS); + + auto* mapping1 = ch_op->add_column_mappings(); + mapping1->set_input_column_index(1); + mapping1->set_clickhouse_column_name("hostname"); + mapping1->set_column_type(types::STRING); + + auto* mapping2 = ch_op->add_column_mappings(); + mapping2->set_input_column_index(2); + mapping2->set_clickhouse_column_name("count"); + mapping2->set_column_type(types::INT64); + + auto* mapping3 = ch_op->add_column_mappings(); + mapping3->set_input_column_index(3); + mapping3->set_clickhouse_column_name("latency"); + mapping3->set_column_type(types::FLOAT64); + + auto plan_node = std::make_unique(1); + EXPECT_OK(plan_node->Init(op.clickhouse_sink_op())); + + return plan_node; + } + + std::unique_ptr clickhouse_server_; + std::unique_ptr client_; + std::unique_ptr exec_state_; + std::unique_ptr func_registry_; +}; + +TEST_F(ClickHouseExportSinkNodeTest, BasicExport) { + const std::string table_name = "export_test_basic"; + CreateExportTable(table_name); + + auto plan_node = CreatePlanNode(table_name); + + // Define input schema + RowDescriptor input_rd({types::TIME64NS, types::STRING, types::INT64, types::FLOAT64}); + + // Create node tester + auto tester = exec::ExecNodeTester( + *plan_node, RowDescriptor({}), {input_rd}, exec_state_.get()); + + // Create test data + auto rb1 = RowBatchBuilder(input_rd, 2, /*eow*/ false, /*eos*/ false) + .AddColumn({1000000000000000000LL, 2000000000000000000LL}) + .AddColumn({"host1", "host2"}) + .AddColumn({100, 200}) + .AddColumn({1.5, 2.5}) + .get(); + + auto rb2 = RowBatchBuilder(input_rd, 1, /*eow*/ true, /*eos*/ true) + .AddColumn({3000000000000000000LL}) + .AddColumn({"host3"}) + .AddColumn({300}) + .AddColumn({3.5}) + .get(); + + // Send data to sink + tester.ConsumeNext(rb1, 0, 0); + tester.ConsumeNext(rb2, 0, 0); + tester.Close(); + + // Verify data was inserted + auto results = QueryTable(absl::Substitute("SELECT hostname, count, latency FROM $0 ORDER BY time_", table_name)); + + ASSERT_EQ(results.size(), 3); + EXPECT_EQ(results[0][0], "host1"); + EXPECT_EQ(results[0][1], "100"); + EXPECT_THAT(results[0][2], ::testing::StartsWith("1.5")); + + EXPECT_EQ(results[1][0], "host2"); + EXPECT_EQ(results[1][1], "200"); + EXPECT_THAT(results[1][2], ::testing::StartsWith("2.5")); + + EXPECT_EQ(results[2][0], "host3"); + EXPECT_EQ(results[2][1], "300"); + EXPECT_THAT(results[2][2], ::testing::StartsWith("3.5")); +} + +TEST_F(ClickHouseExportSinkNodeTest, EmptyBatch) { + const std::string table_name = "export_test_empty"; + CreateExportTable(table_name); + + auto plan_node = CreatePlanNode(table_name); + + RowDescriptor input_rd({types::TIME64NS, types::STRING, types::INT64, types::FLOAT64}); + + auto tester = exec::ExecNodeTester( + *plan_node, RowDescriptor({}), {input_rd}, exec_state_.get()); + + // Send only EOS batch + auto rb = RowBatchBuilder(input_rd, 0, /*eow*/ true, /*eos*/ true) + .AddColumn({}) + .AddColumn({}) + .AddColumn({}) + .AddColumn({}) + .get(); + + tester.ConsumeNext(rb, 0, 0); + tester.Close(); + + // Verify no data was inserted + auto results = QueryTable(absl::Substitute("SELECT COUNT(*) FROM $0", table_name)); + + ASSERT_EQ(results.size(), 1); + EXPECT_EQ(results[0][0], "0"); +} + +TEST_F(ClickHouseExportSinkNodeTest, MultipleBatches) { + const std::string table_name = "export_test_multiple"; + CreateExportTable(table_name); + + auto plan_node = CreatePlanNode(table_name); + + RowDescriptor input_rd({types::TIME64NS, types::STRING, types::INT64, types::FLOAT64}); + + auto tester = exec::ExecNodeTester( + *plan_node, RowDescriptor({}), {input_rd}, exec_state_.get()); + + // Send multiple batches + for (int i = 0; i < 5; ++i) { + bool is_last = (i == 4); + auto rb = RowBatchBuilder(input_rd, 1, /*eow*/ is_last, /*eos*/ is_last) + .AddColumn({(i + 1) * 1000000000000000000LL}) + .AddColumn({absl::Substitute("host$0", i)}) + .AddColumn({i * 100}) + .AddColumn({i * 1.5}) + .get(); + + tester.ConsumeNext(rb, 0, 0); + } + + tester.Close(); + + // Verify all batches were inserted + auto results = QueryTable(absl::Substitute("SELECT COUNT(*) FROM $0", table_name)); + + ASSERT_EQ(results.size(), 1); + EXPECT_EQ(results[0][0], "5"); + + // Verify data order + auto ordered_results = QueryTable(absl::Substitute("SELECT hostname FROM $0 ORDER BY time_", table_name)); + + ASSERT_EQ(ordered_results.size(), 5); + for (int i = 0; i < 5; ++i) { + EXPECT_EQ(ordered_results[i][0], absl::Substitute("host$0", i)); + } +} + +} // namespace exec +} // namespace carnot +} // namespace px diff --git a/src/carnot/exec/exec_graph.cc b/src/carnot/exec/exec_graph.cc index 8b672d3e700..de38d762d7b 100644 --- a/src/carnot/exec/exec_graph.cc +++ b/src/carnot/exec/exec_graph.cc @@ -24,6 +24,7 @@ #include #include "src/carnot/exec/agg_node.h" +#include "src/carnot/exec/clickhouse_export_sink_node.h" #include "src/carnot/exec/clickhouse_source_node.h" #include "src/carnot/exec/empty_source_node.h" #include "src/carnot/exec/equijoin_node.h" @@ -113,6 +114,10 @@ Status ExecutionGraph::Init(table_store::schema::Schema* schema, plan::PlanState return OnOperatorImpl(node, &descriptors); }) + .OnClickHouseExportSink([&](auto& node) { + return OnOperatorImpl(node, + &descriptors); + }) .Walk(pf_); } diff --git a/src/carnot/plan/operators.cc b/src/carnot/plan/operators.cc index bf998e4b2a4..e841ab833af 100644 --- a/src/carnot/plan/operators.cc +++ b/src/carnot/plan/operators.cc @@ -85,6 +85,8 @@ std::unique_ptr Operator::FromProto(const planpb::Operator& pb, int64_ return CreateOperator(id, pb.empty_source_op()); case planpb::CLICKHOUSE_SOURCE_OPERATOR: return CreateOperator(id, pb.clickhouse_source_op()); + case planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR: + return CreateOperator(id, pb.clickhouse_sink_op()); case planpb::OTEL_EXPORT_SINK_OPERATOR: return CreateOperator(id, pb.otel_sink_op()); default: @@ -739,6 +741,27 @@ StatusOr ClickHouseSourceOperator::OutputRelation return r; } +/** + * ClickHouse Export Sink Operator Implementation. + */ + +Status ClickHouseExportSinkOperator::Init(const planpb::ClickHouseExportSinkOperator& pb) { + pb_ = pb; + is_initialized_ = true; + return Status::OK(); +} + +StatusOr ClickHouseExportSinkOperator::OutputRelation( + const table_store::schema::Schema&, const PlanState&, const std::vector&) const { + DCHECK(is_initialized_) << "Not initialized"; + // There are no outputs. + return table_store::schema::Relation(); +} + +std::string ClickHouseExportSinkOperator::DebugString() const { + return absl::Substitute("Op:ClickHouseExportSink(table=$0)", pb_.table_name()); +} + /** * OTel Export Sink Operator Implementation. */ diff --git a/src/carnot/plan/operators.h b/src/carnot/plan/operators.h index e65a0deafc3..d77b5d6b18c 100644 --- a/src/carnot/plan/operators.h +++ b/src/carnot/plan/operators.h @@ -399,6 +399,29 @@ class ClickHouseSourceOperator : public Operator { planpb::ClickHouseSourceOperator pb_; }; +class ClickHouseExportSinkOperator : public Operator { + public: + explicit ClickHouseExportSinkOperator(int64_t id) + : Operator(id, planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR) {} + ~ClickHouseExportSinkOperator() override = default; + + StatusOr OutputRelation( + const table_store::schema::Schema& schema, const PlanState& state, + const std::vector& input_ids) const override; + Status Init(const planpb::ClickHouseExportSinkOperator& pb); + std::string DebugString() const override; + + const planpb::ClickHouseConfig& clickhouse_config() const { return pb_.clickhouse_config(); } + const std::string& table_name() const { return pb_.table_name(); } + const ::google::protobuf::RepeatedPtrField& + column_mappings() const { + return pb_.column_mappings(); + } + + private: + planpb::ClickHouseExportSinkOperator pb_; +}; + class OTelExportSinkOperator : public Operator { public: explicit OTelExportSinkOperator(int64_t id) : Operator(id, planpb::OTEL_EXPORT_SINK_OPERATOR) {} diff --git a/src/carnot/plan/plan_fragment.cc b/src/carnot/plan/plan_fragment.cc index 4ec6c3bf6ef..f9cbc8aa0e7 100644 --- a/src/carnot/plan/plan_fragment.cc +++ b/src/carnot/plan/plan_fragment.cc @@ -101,6 +101,9 @@ Status PlanFragmentWalker::CallWalkFn(const Operator& op) { case planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR: PX_RETURN_IF_ERROR(CallAs(on_clickhouse_source_walk_fn_, op)); break; + case planpb::OperatorType::CLICKHOUSE_EXPORT_SINK_OPERATOR: + PX_RETURN_IF_ERROR(CallAs(on_clickhouse_export_sink_walk_fn_, op)); + break; default: LOG(FATAL) << absl::Substitute("Operator does not exist: $0", magic_enum::enum_name(op_type)); return error::InvalidArgument("Operator does not exist: $0", magic_enum::enum_name(op_type)); diff --git a/src/carnot/plan/plan_fragment.h b/src/carnot/plan/plan_fragment.h index 6351132ac4c..f80090d9c30 100644 --- a/src/carnot/plan/plan_fragment.h +++ b/src/carnot/plan/plan_fragment.h @@ -77,6 +77,7 @@ class PlanFragmentWalker { using EmptySourceWalkFn = std::function; using OTelSinkWalkFn = std::function; using ClickHouseSourceWalkFn = std::function; + using ClickHouseExportSinkWalkFn = std::function; /** * Register callback for when a memory source operator is encountered. @@ -188,6 +189,11 @@ class PlanFragmentWalker { return *this; } + PlanFragmentWalker& OnClickHouseExportSink(const ClickHouseExportSinkWalkFn& fn) { + on_clickhouse_export_sink_walk_fn_ = fn; + return *this; + } + /** * Perform a walk of the plan fragment operators in a topologically-sorted order. * @param plan_fragment The plan fragment to walk. @@ -214,6 +220,7 @@ class PlanFragmentWalker { EmptySourceWalkFn on_empty_source_walk_fn_; OTelSinkWalkFn on_otel_sink_walk_fn_; ClickHouseSourceWalkFn on_clickhouse_source_walk_fn_; + ClickHouseExportSinkWalkFn on_clickhouse_export_sink_walk_fn_; }; } // namespace plan diff --git a/src/carnot/planner/ir/BUILD.bazel b/src/carnot/planner/ir/BUILD.bazel index 55b3ac401d4..3cb11930470 100644 --- a/src/carnot/planner/ir/BUILD.bazel +++ b/src/carnot/planner/ir/BUILD.bazel @@ -67,6 +67,14 @@ pl_cc_test( ], ) +pl_cc_test( + name = "clickhouse_export_sink_ir_test", + srcs = ["clickhouse_export_sink_ir_test.cc"], + deps = [ + "//src/carnot/planner/compiler:test_utils", + ], +) + pl_cc_test( name = "pattern_match_test", srcs = ["pattern_match_test.cc"], diff --git a/src/carnot/planner/ir/all_ir_nodes.h b/src/carnot/planner/ir/all_ir_nodes.h index 23b278d16d3..b5689d1389f 100644 --- a/src/carnot/planner/ir/all_ir_nodes.h +++ b/src/carnot/planner/ir/all_ir_nodes.h @@ -21,6 +21,7 @@ #include "src/carnot/planner/ir/blocking_agg_ir.h" #include "src/carnot/planner/ir/bool_ir.h" #include "src/carnot/planner/ir/clickhouse_source_ir.h" +#include "src/carnot/planner/ir/clickhouse_export_sink_ir.h" #include "src/carnot/planner/ir/column_ir.h" #include "src/carnot/planner/ir/data_ir.h" #include "src/carnot/planner/ir/drop_ir.h" diff --git a/src/carnot/planner/ir/clickhouse_export_sink_ir.cc b/src/carnot/planner/ir/clickhouse_export_sink_ir.cc new file mode 100644 index 00000000000..f3a10ea9556 --- /dev/null +++ b/src/carnot/planner/ir/clickhouse_export_sink_ir.cc @@ -0,0 +1,94 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "src/carnot/planner/ir/clickhouse_export_sink_ir.h" +#include "src/carnot/planner/ir/ir.h" +#include "src/carnot/planpb/plan.pb.h" + +namespace px { +namespace carnot { +namespace planner { + +StatusOr>> +ClickHouseExportSinkIR::RequiredInputColumns() const { + return std::vector>{required_column_names_}; +} + +Status ClickHouseExportSinkIR::ToProto(planpb::Operator* op) const { + op->set_op_type(planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR); + auto clickhouse_op = op->mutable_clickhouse_sink_op(); + + // ClickHouse config must be set before calling ToProto + if (clickhouse_config_ == nullptr) { + return error::InvalidArgument("ClickHouse config not set"); + } + + // Set the ClickHouse configuration + *clickhouse_op->mutable_clickhouse_config() = *clickhouse_config_; + clickhouse_op->set_table_name(table_name_); + + // Map all input columns to ClickHouse columns + DCHECK_EQ(1U, parent_types().size()); + auto parent_table_type = std::static_pointer_cast(parent_types()[0]); + + for (const auto& [idx, col_name] : Enumerate(parent_table_type->ColumnNames())) { + auto column_mapping = clickhouse_op->add_column_mappings(); + column_mapping->set_input_column_index(idx); + column_mapping->set_clickhouse_column_name(col_name); + + PX_ASSIGN_OR_RETURN(auto col_type, parent_table_type->GetColumnType(col_name)); + auto value_type = std::static_pointer_cast(col_type); + column_mapping->set_column_type(value_type->data_type()); + } + + return Status::OK(); +} + +Status ClickHouseExportSinkIR::CopyFromNodeImpl( + const IRNode* node, absl::flat_hash_map*) { + const ClickHouseExportSinkIR* source = static_cast(node); + table_name_ = source->table_name_; + required_column_names_ = source->required_column_names_; + if (source->clickhouse_config_ != nullptr) { + clickhouse_config_ = std::make_unique(*source->clickhouse_config_); + } + return Status::OK(); +} + +Status ClickHouseExportSinkIR::ResolveType(CompilerState* compiler_state) { + DCHECK_EQ(1U, parent_types().size()); + + auto parent_table_type = std::static_pointer_cast(parent_types()[0]); + + // Store ClickHouse config from compiler state + if (compiler_state->clickhouse_config() != nullptr) { + clickhouse_config_ = std::make_unique(*compiler_state->clickhouse_config()); + } + + // Populate required column names + for (const auto& col_name : parent_table_type->ColumnNames()) { + required_column_names_.insert(col_name); + } + + // Export sink passes through the input schema + return SetResolvedType(parent_table_type); +} + +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/ir/clickhouse_export_sink_ir.h b/src/carnot/planner/ir/clickhouse_export_sink_ir.h new file mode 100644 index 00000000000..9864113832a --- /dev/null +++ b/src/carnot/planner/ir/clickhouse_export_sink_ir.h @@ -0,0 +1,75 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include +#include + +#include +#include "src/carnot/planner/compiler_state/compiler_state.h" +#include "src/carnot/planner/ir/column_ir.h" +#include "src/carnot/planner/ir/operator_ir.h" +#include "src/carnot/planpb/plan.pb.h" +#include "src/common/base/base.h" + +namespace px { +namespace carnot { +namespace planner { + +/** + * @brief The IR representation for the ClickHouseExportSink operator. + * Represents a configuration to export a DataFrame to a ClickHouse database. + */ +class ClickHouseExportSinkIR : public OperatorIR { + public: + explicit ClickHouseExportSinkIR(int64_t id) : OperatorIR(id, IRNodeType::kClickHouseExportSink) {} + + Status Init(OperatorIR* parent, const std::string& table_name) { + table_name_ = table_name; + return AddParent(parent); + } + + Status ToProto(planpb::Operator* op) const override; + + Status CopyFromNodeImpl(const IRNode* node, + absl::flat_hash_map*) override; + + Status ResolveType(CompilerState* compiler_state); + inline bool IsBlocking() const override { return true; } + + StatusOr>> RequiredInputColumns() const override; + + const std::string& table_name() const { return table_name_; } + + protected: + StatusOr> PruneOutputColumnsToImpl( + const absl::flat_hash_set& /*kept_columns*/) override { + return error::Unimplemented("Unexpected call to ClickHouseExportSinkIR::PruneOutputColumnsTo."); + } + + private: + std::string table_name_; + absl::flat_hash_set required_column_names_; + std::unique_ptr clickhouse_config_; +}; + +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/ir/clickhouse_export_sink_ir_test.cc b/src/carnot/planner/ir/clickhouse_export_sink_ir_test.cc new file mode 100644 index 00000000000..6b45dfa9820 --- /dev/null +++ b/src/carnot/planner/ir/clickhouse_export_sink_ir_test.cc @@ -0,0 +1,157 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#include +#include +#include + +#include "src/carnot/planner/compiler/test_utils.h" +#include "src/carnot/planner/ir/clickhouse_export_sink_ir.h" +#include "src/carnot/planner/ir/memory_source_ir.h" +#include "src/carnot/planpb/plan.pb.h" +#include "src/common/testing/protobuf.h" +#include "src/table_store/table_store.h" + +namespace px { +namespace carnot { +namespace planner { + +using ClickHouseExportSinkTest = ASTVisitorTest; + +TEST_F(ClickHouseExportSinkTest, basic_export) { + // Create a simple relation with some columns + table_store::schema::Relation relation{ + {types::TIME64NS, types::STRING, types::INT64, types::FLOAT64}, + {"time_", "hostname", "count", "latency"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE, types::ST_DURATION_NS}}; + + (*compiler_state_->relation_map())["table"] = relation; + + auto src = MakeMemSource("table"); + EXPECT_OK(src->ResolveType(compiler_state_.get())); + + ASSERT_OK_AND_ASSIGN(auto clickhouse_sink, + graph->CreateNode(src->ast(), src, "http_events")); + + clickhouse_sink->PullParentTypes(); + EXPECT_OK(clickhouse_sink->UpdateOpAfterParentTypesResolved()); + + // ResolveType will try to get config from compiler state, but we'll set it directly + // by creating a new CompilerState with ClickHouse config + auto new_relation_map = std::make_unique(); + (*new_relation_map)["table"] = relation; + + auto clickhouse_config = std::make_unique(); + clickhouse_config->set_host("localhost"); + clickhouse_config->set_port(9000); + clickhouse_config->set_username("default"); + clickhouse_config->set_password("test_password"); + clickhouse_config->set_database("default"); + + auto new_compiler_state = std::make_unique( + std::move(new_relation_map), + SensitiveColumnMap{}, + compiler_state_->registry_info(), + compiler_state_->time_now(), + 0, // max_output_rows_per_table + "", // result_address + "", // result_ssl_targetname + RedactionOptions{}, + nullptr, // endpoint_config + nullptr, // plugin_config + DebugInfo{}, + std::move(clickhouse_config)); + + // ResolveType will copy the config from compiler state + EXPECT_OK(clickhouse_sink->ResolveType(new_compiler_state.get())); + + planpb::Operator pb; + EXPECT_OK(clickhouse_sink->ToProto(&pb)); + + EXPECT_EQ(pb.op_type(), planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR); + EXPECT_EQ(pb.clickhouse_sink_op().table_name(), "http_events"); + EXPECT_EQ(pb.clickhouse_sink_op().column_mappings_size(), 4); + + // Verify column mappings + EXPECT_EQ(pb.clickhouse_sink_op().column_mappings(0).input_column_index(), 0); + EXPECT_EQ(pb.clickhouse_sink_op().column_mappings(0).clickhouse_column_name(), "time_"); + EXPECT_EQ(pb.clickhouse_sink_op().column_mappings(0).column_type(), types::TIME64NS); + + EXPECT_EQ(pb.clickhouse_sink_op().column_mappings(1).input_column_index(), 1); + EXPECT_EQ(pb.clickhouse_sink_op().column_mappings(1).clickhouse_column_name(), "hostname"); + EXPECT_EQ(pb.clickhouse_sink_op().column_mappings(1).column_type(), types::STRING); +} + +TEST_F(ClickHouseExportSinkTest, required_input_columns) { + table_store::schema::Relation relation{ + {types::TIME64NS, types::STRING, types::INT64}, + {"time_", "hostname", "count"}, + {types::ST_NONE, types::ST_NONE, types::ST_NONE}}; + + (*compiler_state_->relation_map())["table"] = relation; + + auto src = MakeMemSource("table"); + EXPECT_OK(src->ResolveType(compiler_state_.get())); + + ASSERT_OK_AND_ASSIGN(auto clickhouse_sink, + graph->CreateNode(src->ast(), src, "http_events")); + + clickhouse_sink->PullParentTypes(); + EXPECT_OK(clickhouse_sink->UpdateOpAfterParentTypesResolved()); + + // Need to call ResolveType to populate required_column_names_ + auto clickhouse_config = std::make_unique(); + clickhouse_config->set_host("localhost"); + clickhouse_config->set_port(9000); + clickhouse_config->set_username("default"); + clickhouse_config->set_password("test_password"); + clickhouse_config->set_database("default"); + + auto new_relation_map = std::make_unique(); + (*new_relation_map)["table"] = relation; + + auto new_compiler_state = std::make_unique( + std::move(new_relation_map), + SensitiveColumnMap{}, + compiler_state_->registry_info(), + compiler_state_->time_now(), + 0, + "", + "", + RedactionOptions{}, + nullptr, + nullptr, + DebugInfo{}, + std::move(clickhouse_config)); + + EXPECT_OK(clickhouse_sink->ResolveType(new_compiler_state.get())); + + ASSERT_OK_AND_ASSIGN(auto required_input_columns, clickhouse_sink->RequiredInputColumns()); + ASSERT_EQ(required_input_columns.size(), 1); + EXPECT_THAT(required_input_columns[0], + ::testing::UnorderedElementsAre("time_", "hostname", "count")); +} + + +} // namespace planner +} // namespace carnot +} // namespace px diff --git a/src/carnot/planner/ir/operators.inl b/src/carnot/planner/ir/operators.inl index 9ace7fd94e6..bb712c71c11 100644 --- a/src/carnot/planner/ir/operators.inl +++ b/src/carnot/planner/ir/operators.inl @@ -38,5 +38,6 @@ PX_CARNOT_IR_NODE(Stream) PX_CARNOT_IR_NODE(EmptySource) PX_CARNOT_IR_NODE(OTelExportSink) PX_CARNOT_IR_NODE(ClickHouseSource) +PX_CARNOT_IR_NODE(ClickHouseExportSink) #endif diff --git a/src/carnot/planner/objects/otel.cc b/src/carnot/planner/objects/otel.cc index 7f79d6196bb..7e96bb8fdcc 100644 --- a/src/carnot/planner/objects/otel.cc +++ b/src/carnot/planner/objects/otel.cc @@ -24,6 +24,7 @@ #include #include +#include "src/carnot/planner/ir/clickhouse_export_sink_ir.h" #include "src/carnot/planner/ir/otel_export_sink_ir.h" #include "src/carnot/planner/objects/dataframe.h" #include "src/carnot/planner/objects/dict_object.h" @@ -70,6 +71,11 @@ Status ExportToOTel(const OTelData& data, const pypa::AstPtr& ast, Dataframe* df return op->graph()->CreateNode(ast, op, data).status(); } +Status ExportToClickHouse(const std::string& table_name, const pypa::AstPtr& ast, Dataframe* df) { + auto op = df->op(); + return op->graph()->CreateNode(ast, op, table_name).status(); +} + StatusOr GetArgAsString(const pypa::AstPtr& ast, const ParsedArgs& args, std::string_view arg_name) { PX_ASSIGN_OR_RETURN(StringIR * arg_ir, GetArgAs(ast, args, arg_name)); @@ -100,6 +106,22 @@ StatusOr> OTelDataContainer::Create( return std::shared_ptr(new OTelDataContainer(ast_visitor, std::move(data))); } +StatusOr> ClickHouseRows::Create( + ASTVisitor* ast_visitor, const std::string& table_name) { + return std::shared_ptr(new ClickHouseRows(ast_visitor, table_name)); +} + +StatusOr ClickHouseRowsDefinition(const pypa::AstPtr& ast, const ParsedArgs& args, + ASTVisitor* visitor) { + PX_ASSIGN_OR_RETURN(StringIR* table_name_ir, GetArgAs(ast, args, "table")); + std::string table_name = table_name_ir->str(); + + return Exporter::Create(visitor, [table_name](auto&& ast_arg, auto&& df) -> Status { + return ExportToClickHouse(table_name, std::forward(ast_arg), + std::forward(df)); + }); +} + StatusOr> ParseAttributes(DictObject* attributes) { auto values = attributes->values(); auto keys = attributes->keys(); @@ -339,6 +361,17 @@ Status OTelModule::Init(CompilerState* compiler_state, IR* ir) { AddMethod(kEndpointOpID, endpoint_fn); PX_RETURN_IF_ERROR(endpoint_fn->SetDocString(kEndpointOpDocstring)); + PX_ASSIGN_OR_RETURN( + std::shared_ptr clickhouse_rows_fn, + FuncObject::Create(kClickHouseRowsOpID, {"table"}, {}, + /* has_variable_len_args */ false, + /* has_variable_len_kwargs */ false, + std::bind(&ClickHouseRowsDefinition, std::placeholders::_1, + std::placeholders::_2, std::placeholders::_3), + ast_visitor())); + AddMethod(kClickHouseRowsOpID, clickhouse_rows_fn); + PX_RETURN_IF_ERROR(clickhouse_rows_fn->SetDocString(kClickHouseRowsOpDocstring)); + return Status::OK(); } diff --git a/src/carnot/planner/objects/otel.h b/src/carnot/planner/objects/otel.h index 5f4c1d19eb7..7019d58ffff 100644 --- a/src/carnot/planner/objects/otel.h +++ b/src/carnot/planner/objects/otel.h @@ -87,6 +87,24 @@ class OTelModule : public QLObject { timeout (int, optional): The number of seconds before the request should timeout when exporting to the OTel collector. )doc"; + inline static constexpr char kClickHouseRowsOpID[] = "ClickHouseRows"; + inline static constexpr char kClickHouseRowsOpDocstring[] = R"doc( + Specifies a ClickHouse table to export DataFrame rows to. + + Describes the table name in ClickHouse where columnar DataFrame data will be + inserted. All columns from the DataFrame will be mapped to corresponding + columns in the ClickHouse table. Passed as the data argument to `px.export`. + + :topic: clickhouse + + Args: + table (string): The name of the ClickHouse table to insert data into. + + Returns: + ClickHouseRows: Configuration for exporting DataFrame data to ClickHouse. + Can be passed to `px.export`. + )doc"; + protected: explicit OTelModule(ASTVisitor* ast_visitor) : QLObject(OTelModuleType, ast_visitor) {} Status Init(CompilerState* compiler_state, IR* ir); @@ -269,6 +287,30 @@ class OTelDataContainer : public QLObject { std::variant data_; }; +class ClickHouseRows : public QLObject { + public: + static constexpr TypeDescriptor ClickHouseRowsType = { + /* name */ "ClickHouseRows", + /* type */ QLObjectType::kClickHouseRows, + }; + + static StatusOr> Create( + ASTVisitor* ast_visitor, const std::string& table_name); + + static bool IsClickHouseRows(const QLObjectPtr& obj) { + return obj->type() == ClickHouseRowsType.type(); + } + + const std::string& table_name() const { return table_name_; } + + protected: + ClickHouseRows(ASTVisitor* ast_visitor, std::string table_name) + : QLObject(ClickHouseRowsType, ast_visitor), table_name_(std::move(table_name)) {} + + private: + std::string table_name_; +}; + } // namespace compiler } // namespace planner } // namespace carnot diff --git a/src/carnot/planner/objects/qlobject.h b/src/carnot/planner/objects/qlobject.h index 4231fb78b0e..0ebf03da257 100644 --- a/src/carnot/planner/objects/qlobject.h +++ b/src/carnot/planner/objects/qlobject.h @@ -66,6 +66,7 @@ enum class QLObjectType { kExporter, kOTelEndpoint, kOTelDataContainer, + kClickHouseRows, }; std::string QLObjectTypeString(QLObjectType type); diff --git a/src/carnot/planpb/plan.proto b/src/carnot/planpb/plan.proto index acc95bfaceb..738b4793c08 100644 --- a/src/carnot/planpb/plan.proto +++ b/src/carnot/planpb/plan.proto @@ -116,6 +116,7 @@ enum OperatorType { MEMORY_SINK_OPERATOR = 9000; GRPC_SINK_OPERATOR = 9100; OTEL_EXPORT_SINK_OPERATOR = 9200; + CLICKHOUSE_EXPORT_SINK_OPERATOR = 9300; } // The Logical operation performed. Each operator needs and entry in this @@ -152,6 +153,8 @@ message Operator { OTelExportSinkOperator otel_sink_op = 14 [ (gogoproto.customname) = "OTelSinkOp" ]; // ClickHouseSourceOperator reads data from a ClickHouse database. ClickHouseSourceOperator clickhouse_source_op = 15; + // ClickHouseExportSinkOperator writes the input table to a ClickHouse database. + ClickHouseExportSinkOperator clickhouse_sink_op = 16; } } @@ -604,6 +607,24 @@ message OTelExportSinkOperator { repeated OTelLog logs = 5; } +message ClickHouseExportSinkOperator { + // ClickHouse connection parameters + ClickHouseConfig clickhouse_config = 1; + // Target table name to insert data into + string table_name = 2; + // Column mapping: maps input column indices to ClickHouse table columns + repeated ColumnMapping column_mappings = 3; + + message ColumnMapping { + // Index of the column in the input row batch + int32 input_column_index = 1; + // Name of the column in the ClickHouse table + string clickhouse_column_name = 2; + // Data type of the column + px.types.DataType column_type = 3; + } +} + // Scalar expression is any single valued expression. message ScalarExpression { oneof value { From 52086178bd92102c984ba6662cbed77fedcd28c8 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Mon, 20 Oct 2025 23:31:05 +0000 Subject: [PATCH 66/86] Add px.CreateClickHouseSchemas UDTF, integrate in standalone_pem and carnot_executable Signed-off-by: Dom Del Nano --- src/carnot/BUILD.bazel | 6 + src/carnot/carnot_executable.cc | 20 ++ src/experimental/standalone_pem/BUILD.bazel | 1 + .../standalone_pem/standalone_pem_manager.cc | 4 +- .../standalone_pem/standalone_pem_manager.h | 4 + src/shared/version/BUILD.bazel | 1 + src/vizier/funcs/md_udtfs/BUILD.bazel | 1 + src/vizier/funcs/md_udtfs/md_udtfs.cc | 3 + src/vizier/funcs/md_udtfs/md_udtfs_impl.h | 243 ++++++++++++++++++ .../services/metadata/local/BUILD.bazel | 33 +++ .../metadata/local/local_metadata_service.h | 160 ++++++++++++ .../services/metadata/metadatapb/BUILD.bazel | 12 +- 12 files changed, 485 insertions(+), 3 deletions(-) create mode 100644 src/vizier/services/metadata/local/BUILD.bazel create mode 100644 src/vizier/services/metadata/local/local_metadata_service.h diff --git a/src/carnot/BUILD.bazel b/src/carnot/BUILD.bazel index a796db3363c..2f58957d210 100644 --- a/src/carnot/BUILD.bazel +++ b/src/carnot/BUILD.bazel @@ -101,6 +101,7 @@ pl_cc_binary( data = [ "//src/stirling/source_connectors/socket_tracer/testing/container_images:clickhouse.tar", ], + stamp = -1, tags = [ "requires_docker", ], @@ -108,6 +109,11 @@ pl_cc_binary( ":cc_library", "//src/common/testing:cc_library", "//src/common/testing/test_utils:cc_library", + "//src/shared/version:cc_library", + "//src/shared/version:version_linkstamp", + "//src/vizier/funcs:cc_library", + "//src/vizier/funcs/context:cc_library", + "//src/vizier/services/metadata/local:cc_library", "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", ], ) diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index 767344a2fd4..670e3e9b1bf 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -38,6 +38,9 @@ #include "src/shared/types/column_wrapper.h" #include "src/shared/types/type_utils.h" #include "src/table_store/table_store.h" +#include "src/vizier/funcs/context/vizier_context.h" +#include "src/vizier/funcs/funcs.h" +#include "src/vizier/services/metadata/local/local_metadata_service.h" // Example clickhouse test usage: // The records inserted into clickhouse exist between -10m and -5m @@ -467,8 +470,25 @@ int main(int argc, char* argv[]) { // Execute query. auto table_store = std::make_shared(); auto result_server = px::carnot::exec::LocalGRPCResultSinkServer(); + + // Create metadata service stub for table schemas + auto metadata_grpc_server = std::make_unique(table_store.get()); + + // Create vizier func factory context with metadata stub + px::vizier::funcs::VizierFuncFactoryContext func_context( + nullptr, // agent_manager + metadata_grpc_server->StubGenerator(), // mds_stub + nullptr, // mdtp_stub + nullptr, // cronscript_stub + table_store, + [](grpc::ClientContext*) {} // add_grpc_auth + ); + auto func_registry = std::make_unique("default_registry"); + // Register both carnot and vizier functions px::carnot::funcs::RegisterFuncsOrDie(func_registry.get()); + px::vizier::funcs::RegisterFuncsOrDie(func_context, func_registry.get()); + auto clients_config = std::make_unique(px::carnot::Carnot::ClientsConfig{ [&result_server](const std::string& address, const std::string&) { diff --git a/src/experimental/standalone_pem/BUILD.bazel b/src/experimental/standalone_pem/BUILD.bazel index d7ebafcf122..189842536ac 100644 --- a/src/experimental/standalone_pem/BUILD.bazel +++ b/src/experimental/standalone_pem/BUILD.bazel @@ -50,6 +50,7 @@ pl_cc_library( "//src/vizier/funcs:cc_library", "//src/vizier/funcs/context:cc_library", "//src/vizier/services/agent/shared/base:cc_library", + "//src/vizier/services/metadata/local:cc_library", "@com_github_grpc_grpc//:grpc++", ], ) diff --git a/src/experimental/standalone_pem/standalone_pem_manager.cc b/src/experimental/standalone_pem/standalone_pem_manager.cc index d1257dbdbfd..323729d443c 100644 --- a/src/experimental/standalone_pem/standalone_pem_manager.cc +++ b/src/experimental/standalone_pem/standalone_pem_manager.cc @@ -27,6 +27,7 @@ #include "src/shared/schema/utils.h" #include "src/table_store/table_store.h" #include "src/vizier/funcs/funcs.h" +#include "src/vizier/services/metadata/local/local_metadata_service.h" DEFINE_int32( table_store_data_limit, gflags::Int32FromEnv("PL_TABLE_STORE_DATA_LIMIT_MB", 1024 + 256), @@ -72,7 +73,8 @@ StandalonePEMManager::StandalonePEMManager(sole::uuid agent_id, std::string_view api_(std::make_unique(time_system_.get())), dispatcher_(api_->AllocateDispatcher("manager")), table_store_(std::make_shared()), - func_context_(this, /* mds_stub= */ nullptr, /* mdtp_stub= */ nullptr, + metadata_grpc_server_(std::make_unique(table_store_.get())), + func_context_(this, metadata_grpc_server_->StubGenerator(), /* mdtp_stub= */ nullptr, /* cronscript_stub= */ nullptr, table_store_, [](grpc::ClientContext*) {}), stirling_(px::stirling::Stirling::Create(px::stirling::CreateSourceRegistryFromFlag())), results_sink_server_(std::make_unique()) { diff --git a/src/experimental/standalone_pem/standalone_pem_manager.h b/src/experimental/standalone_pem/standalone_pem_manager.h index 9d658b1306a..bb56d29cac0 100644 --- a/src/experimental/standalone_pem/standalone_pem_manager.h +++ b/src/experimental/standalone_pem/standalone_pem_manager.h @@ -31,6 +31,7 @@ #include "src/vizier/funcs/context/vizier_context.h" #include "src/vizier/services/agent/shared/base/base_manager.h" #include "src/vizier/services/agent/shared/base/info.h" +#include "src/vizier/services/metadata/local/local_metadata_service.h" namespace px { namespace vizier { @@ -72,6 +73,9 @@ class StandalonePEMManager : public BaseManager { std::shared_ptr table_store_; + // Metadata gRPC server must be initialized before func_context_ + std::unique_ptr metadata_grpc_server_; + // Factory context for vizier functions. funcs::VizierFuncFactoryContext func_context_; diff --git a/src/shared/version/BUILD.bazel b/src/shared/version/BUILD.bazel index a94f6553cec..835730c7f4c 100644 --- a/src/shared/version/BUILD.bazel +++ b/src/shared/version/BUILD.bazel @@ -77,6 +77,7 @@ pl_cc_library_internal( # be restricted to binaries. # TODO(zasgar): Refactor dependent code so we can more precisely apply the visbility rules. visibility = [ + "//src/carnot:__pkg__", "//src/carnot/planner/docs:__pkg__", "//src/experimental:__subpackages__", "//src/vizier/funcs:__pkg__", diff --git a/src/vizier/funcs/md_udtfs/BUILD.bazel b/src/vizier/funcs/md_udtfs/BUILD.bazel index c5a966b5ca6..161ec9c3fe5 100644 --- a/src/vizier/funcs/md_udtfs/BUILD.bazel +++ b/src/vizier/funcs/md_udtfs/BUILD.bazel @@ -47,6 +47,7 @@ pl_cc_library( "//src/vizier/services/agent/shared/manager:cc_headers", "//src/vizier/services/metadata/metadatapb:service_pl_cc_proto", "@com_github_arun11299_cpp_jwt//:cpp_jwt", + "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", "@com_github_grpc_grpc//:grpc++", ], ) diff --git a/src/vizier/funcs/md_udtfs/md_udtfs.cc b/src/vizier/funcs/md_udtfs/md_udtfs.cc index 193c6d45dff..ec6f8926e80 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs.cc +++ b/src/vizier/funcs/md_udtfs/md_udtfs.cc @@ -58,6 +58,9 @@ void RegisterFuncsOrDie(const VizierFuncFactoryContext& ctx, carnot::udf::Regist registry ->RegisterFactoryOrDie>( "GetCronScriptHistory", ctx); + registry->RegisterFactoryOrDie>( + "CreateClickHouseSchemas", ctx); } } // namespace md diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index e48dd4ce790..02a5ba60f94 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -28,6 +28,7 @@ #include #include +#include #include #include @@ -1073,6 +1074,248 @@ class GetCronScriptHistory final : public carnot::udf::UDTF add_context_authentication_func_; }; +namespace clickhouse_schema { + +/** + * Maps Pixie DataType to ClickHouse type string. + * Based on the mapping used in carnot_executable.cc for http_events table. + */ +inline std::string PixieTypeToClickHouseType(types::DataType pixie_type, + const std::string& column_name) { + switch (pixie_type) { + case types::DataType::INT64: + return "Int64"; + case types::DataType::FLOAT64: + return "Float64"; + case types::DataType::STRING: + return "String"; + case types::DataType::BOOLEAN: + return "UInt8"; + case types::DataType::TIME64NS: + // Use DateTime64(9) for time_ column (nanoseconds) + // Use DateTime64(3) for event_time column (milliseconds) + if (column_name == "time_") { + return "DateTime64(9)"; + } else if (column_name == "event_time") { + return "DateTime64(3)"; + } + // Default to DateTime64(9) for other time columns + return "DateTime64(9)"; + case types::DataType::UINT128: + // ClickHouse doesn't have native UINT128, use String representation + return "String"; + default: + return "String"; // Fallback to String for unsupported types + } +} + +} // namespace clickhouse_schema + +/** + * This UDTF creates ClickHouse schemas from Pixie DataTable schemas. + * It fetches table schemas from MDS and creates corresponding tables in ClickHouse. + */ +class CreateClickHouseSchemas final : public carnot::udf::UDTF { + public: + using MDSStub = vizier::services::metadata::MetadataService::Stub; + using SchemaResponse = vizier::services::metadata::SchemaResponse; + + CreateClickHouseSchemas() = delete; + CreateClickHouseSchemas(std::shared_ptr stub, + std::function add_context_authentication) + : idx_(0), stub_(stub), add_context_authentication_func_(add_context_authentication) {} + + static constexpr auto Executor() { return carnot::udfspb::UDTFSourceExecutor::UDTF_ONE_KELVIN; } + + static constexpr auto OutputRelation() { + return MakeArray(ColInfo("table_name", types::DataType::STRING, types::PatternType::GENERAL, + "The name of the table"), + ColInfo("status", types::DataType::STRING, types::PatternType::GENERAL, + "Status of the table creation (success/error)"), + ColInfo("message", types::DataType::STRING, types::PatternType::GENERAL, + "Additional information or error message")); + } + + static constexpr auto InitArgs() { + return MakeArray( + UDTFArg::Make("host", "ClickHouse server host", "localhost"), + UDTFArg::Make("port", "ClickHouse server port", 9000), + UDTFArg::Make("username", "ClickHouse username", "default"), + UDTFArg::Make("password", "ClickHouse password", ""), + UDTFArg::Make("database", "ClickHouse database", "default"), + UDTFArg::Make( + "table_filter", "Optional table name filter (empty for all tables)", "")); + } + + Status Init(FunctionContext*, types::StringValue host, types::Int64Value port, + types::StringValue username, types::StringValue password, + types::StringValue database, types::StringValue table_filter) { + // Store ClickHouse connection parameters + host_ = std::string(host); + port_ = port.val; + username_ = std::string(username); + password_ = std::string(password); + database_ = std::string(database); + table_filter_ = std::string(table_filter); + + // Fetch schemas from MDS + px::vizier::services::metadata::SchemaRequest req; + px::vizier::services::metadata::SchemaResponse resp; + + grpc::ClientContext ctx; + add_context_authentication_func_(&ctx); + auto s = stub_->GetSchemas(&ctx, req, &resp); + if (!s.ok()) { + return error::Internal("Failed to make RPC call to metadata service: $0", + s.error_message()); + } + + // Connect to ClickHouse + clickhouse::ClientOptions client_options; + client_options.SetHost(host_); + client_options.SetPort(port_); + client_options.SetUser(username_); + client_options.SetPassword(password_); + client_options.SetDefaultDatabase(database_); + + try { + clickhouse_client_ = std::make_unique(client_options); + // Test connection + clickhouse_client_->Execute("SELECT 1"); + } catch (const std::exception& e) { + return error::Internal("Failed to connect to ClickHouse at $0:$1 - $2", + host_, port_, e.what()); + } + + // Process each table + for (const auto& [table_name, rel] : resp.schema().relation_map()) { + // Apply table filter if specified + if (!table_filter_.empty() && table_name != table_filter_) { + continue; + } + + TableResult result; + result.table_name = table_name; + + // Check if table has a time_ column (required for partitioning) + bool has_time_column = false; + for (const auto& col : rel.columns()) { + if (col.column_name() == "time_" && + col.column_type() == types::DataType::TIME64NS) { + has_time_column = true; + break; + } + } + + if (!has_time_column) { + result.status = "skipped"; + result.message = "Table does not have a time_ TIME64NS column, skipping"; + results_.push_back(result); + continue; + } + + // Generate CREATE TABLE statement + std::string create_table_sql = GenerateCreateTableSQL(table_name, rel); + + // Execute the CREATE TABLE + try { + // Drop existing table if it exists + clickhouse_client_->Execute(absl::Substitute("DROP TABLE IF EXISTS $0", table_name)); + + // Create new table + clickhouse_client_->Execute(create_table_sql); + + result.status = "success"; + result.message = "Table created successfully"; + } catch (const std::exception& e) { + result.status = "error"; + result.message = absl::Substitute("Failed to create table: $0", e.what()); + } + + results_.push_back(result); + } + + return Status::OK(); + } + + bool NextRecord(FunctionContext*, RecordWriter* rw) { + if (idx_ >= static_cast(results_.size())) { + return false; + } + + const auto& result = results_[idx_]; + rw->Append(result.table_name); + rw->Append(result.status); + rw->Append(result.message); + + idx_++; + return idx_ < static_cast(results_.size()); + } + + private: + struct TableResult { + std::string table_name; + std::string status; + std::string message; + }; + + /** + * Generates a CREATE TABLE SQL statement for ClickHouse based on Pixie table schema. + * Follows the pattern from carnot_executable.cc: + * - Maps Pixie types to ClickHouse types + * - Adds hostname String column + * - Adds event_time DateTime64(3) column + * - Uses ENGINE = MergeTree() + * - Uses PARTITION BY toYYYYMM(event_time) + * - Uses ORDER BY (hostname, event_time) + */ + std::string GenerateCreateTableSQL(const std::string& table_name, + const px::table_store::schemapb::Relation& schema) { + std::vector column_defs; + + // Add columns from schema + for (const auto& col : schema.columns()) { + std::string column_name = col.column_name(); + std::string clickhouse_type = clickhouse_schema::PixieTypeToClickHouseType( + col.column_type(), column_name); + column_defs.push_back(absl::Substitute("$0 $1", column_name, clickhouse_type)); + } + + // Add hostname column + column_defs.push_back("hostname String"); + + // Add event_time column for partitioning (will be populated from time_ column) + column_defs.push_back("event_time DateTime64(3)"); + + // Build the CREATE TABLE statement + std::string columns_str = absl::StrJoin(column_defs, ",\n "); + + std::string create_sql = absl::Substitute(R"( + CREATE TABLE $0 ( + $1 + ) ENGINE = MergeTree() + PARTITION BY toYYYYMM(event_time) + ORDER BY (hostname, event_time) + )", table_name, columns_str); + + return create_sql; + } + + int idx_ = 0; + std::vector results_; + std::shared_ptr stub_; + std::function add_context_authentication_func_; + std::unique_ptr clickhouse_client_; + + // ClickHouse connection parameters + std::string host_; + int port_; + std::string username_; + std::string password_; + std::string database_; + std::string table_filter_; +}; + } // namespace md } // namespace funcs } // namespace vizier diff --git a/src/vizier/services/metadata/local/BUILD.bazel b/src/vizier/services/metadata/local/BUILD.bazel new file mode 100644 index 00000000000..1f2ae16792f --- /dev/null +++ b/src/vizier/services/metadata/local/BUILD.bazel @@ -0,0 +1,33 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("//bazel:pl_build_system.bzl", "pl_cc_library") + +package(default_visibility = [ + "//src/carnot:__subpackages__", + "//src/experimental:__subpackages__", + "//src/vizier:__subpackages__", +]) + +pl_cc_library( + name = "cc_library", + hdrs = ["local_metadata_service.h"], + deps = [ + "//src/table_store:cc_library", + "//src/vizier/services/metadata/metadatapb:service_pl_cc_proto", + "@com_github_grpc_grpc//:grpc++", + ], +) diff --git a/src/vizier/services/metadata/local/local_metadata_service.h b/src/vizier/services/metadata/local/local_metadata_service.h new file mode 100644 index 00000000000..bb3b8b4fc94 --- /dev/null +++ b/src/vizier/services/metadata/local/local_metadata_service.h @@ -0,0 +1,160 @@ +/* + * Copyright 2018- The Pixie Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include +#include + +#include "src/common/base/base.h" +#include "src/table_store/table_store.h" +#include "src/vizier/services/metadata/metadatapb/service.grpc.pb.h" +#include "src/vizier/services/metadata/metadatapb/service.pb.h" + +namespace px { +namespace vizier { +namespace services { +namespace metadata { + +/** + * LocalMetadataServiceImpl implements a local stub for the MetadataService. + * Only GetSchemas is implemented - it reads from the table store. + * All other methods return UNIMPLEMENTED status. + * + * This is useful for testing and local execution environments where + * a full metadata service is not available. + */ +class LocalMetadataServiceImpl final : public MetadataService::Service { + public: + LocalMetadataServiceImpl() = delete; + explicit LocalMetadataServiceImpl(table_store::TableStore* table_store) + : table_store_(table_store) {} + + ::grpc::Status GetSchemas(::grpc::ServerContext*, const SchemaRequest*, + SchemaResponse* response) override { + LOG(INFO) << "GetSchemas called"; + + // Get all table IDs from the table store + auto table_ids = table_store_->GetTableIDs(); + + // Build the schema response + auto* schema = response->mutable_schema(); + + for (const auto& table_id : table_ids) { + // Get the table name + std::string table_name = table_store_->GetTableName(table_id); + if (table_name.empty()) { + LOG(WARNING) << "Failed to get table name for ID: " << table_id; + continue; + } + + // Get the table object + auto* table = table_store_->GetTable(table_id); + if (table == nullptr) { + LOG(WARNING) << "Failed to get table for ID: " << table_id; + continue; + } + + // Get the relation from the table + auto relation = table->GetRelation(); + + // Add to the relation map in the schema + // The map value is a Relation proto directly + auto& rel_proto = (*schema->mutable_relation_map())[table_name]; + + // Add columns to the relation + for (size_t i = 0; i < relation.NumColumns(); ++i) { + auto* col = rel_proto.add_columns(); + col->set_column_name(relation.GetColumnName(i)); + col->set_column_type(relation.GetColumnType(i)); + col->set_column_desc(""); // No description available from table store + col->set_pattern_type(types::PatternType::GENERAL); + } + + // Set table description (empty for now) + rel_proto.set_desc(""); + } + + return ::grpc::Status::OK; + } + + ::grpc::Status GetAgentUpdates(::grpc::ServerContext*, const AgentUpdatesRequest*, + ::grpc::ServerWriter*) override { + return ::grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "GetAgentUpdates not implemented"); + } + + ::grpc::Status GetAgentInfo(::grpc::ServerContext*, const AgentInfoRequest*, + AgentInfoResponse*) override { + return ::grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "GetAgentInfo not implemented"); + } + + ::grpc::Status GetWithPrefixKey(::grpc::ServerContext*, const WithPrefixKeyRequest*, + WithPrefixKeyResponse*) override { + return ::grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "GetWithPrefixKey not implemented"); + } + + private: + table_store::TableStore* table_store_; +}; + +/** + * LocalMetadataGRPCServer wraps the LocalMetadataServiceImpl and provides a gRPC server. + * Uses in-process communication for efficiency. + */ +class LocalMetadataGRPCServer { + public: + LocalMetadataGRPCServer() = delete; + explicit LocalMetadataGRPCServer(table_store::TableStore* table_store) + : metadata_service_(std::make_unique(table_store)) { + grpc::ServerBuilder builder; + + // Use in-process communication + builder.RegisterService(metadata_service_.get()); + + grpc_server_ = builder.BuildAndStart(); + CHECK(grpc_server_ != nullptr); + + LOG(INFO) << "Starting Local Metadata service (in-process)"; + } + + void Stop() { + if (grpc_server_) { + grpc_server_->Shutdown(); + } + grpc_server_.reset(nullptr); + } + + ~LocalMetadataGRPCServer() { Stop(); } + + std::shared_ptr StubGenerator() const { + grpc::ChannelArguments args; + // NewStub returns unique_ptr, convert to shared_ptr + return std::shared_ptr( + MetadataService::NewStub(grpc_server_->InProcessChannel(args))); + } + + private: + std::unique_ptr grpc_server_; + std::unique_ptr metadata_service_; +}; + +} // namespace metadata +} // namespace services +} // namespace vizier +} // namespace px diff --git a/src/vizier/services/metadata/metadatapb/BUILD.bazel b/src/vizier/services/metadata/metadatapb/BUILD.bazel index 11b8b4962db..a5434b84468 100644 --- a/src/vizier/services/metadata/metadatapb/BUILD.bazel +++ b/src/vizier/services/metadata/metadatapb/BUILD.bazel @@ -19,7 +19,11 @@ load("//bazel:proto_compile.bzl", "pl_cc_proto_library", "pl_go_proto_library", pl_proto_library( name = "service_pl_proto", srcs = ["service.proto"], - visibility = ["//src/vizier:__subpackages__"], + visibility = [ + "//src/carnot:__subpackages__", + "//src/experimental:__subpackages__", + "//src/vizier:__subpackages__", + ], deps = [ "//src/api/proto/uuidpb:uuid_pl_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_proto", @@ -37,7 +41,11 @@ pl_proto_library( pl_cc_proto_library( name = "service_pl_cc_proto", proto = ":service_pl_proto", - visibility = ["//src/vizier:__subpackages__"], + visibility = [ + "//src/carnot:__subpackages__", + "//src/experimental:__subpackages__", + "//src/vizier:__subpackages__", + ], deps = [ "//src/api/proto/uuidpb:uuid_pl_cc_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_cc_proto", From a5b02e3daaa72d62d2547ce395ebeed595e4ce9d Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Tue, 21 Oct 2025 14:21:56 +0000 Subject: [PATCH 67/86] First working version of px.CreateClickHouseSchemas UDTF Signed-off-by: Dom Del Nano --- src/carnot/carnot_executable.cc | 46 ++++--------- src/table_store/schema/row_batch.cc | 2 +- src/vizier/funcs/md_udtfs/md_udtfs_impl.h | 24 +++---- .../metadata/local/local_metadata_service.h | 68 ++++++++++++++++++- 4 files changed, 87 insertions(+), 53 deletions(-) diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index 670e3e9b1bf..dc250612c2c 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -283,37 +283,8 @@ std::unique_ptr SetupClickHouseClient() { /** * Creates the http_events table in ClickHouse with proper schema and sample data. */ -void CreateHttpEventsTable(clickhouse::Client* client) { +void PopulateHttpEventsTable(clickhouse::Client* client) { try { - client->Execute("DROP TABLE IF EXISTS http_events"); - - // Create table with http_events schema plus hostname and event_time - client->Execute(R"( - CREATE TABLE http_events ( - time_ DateTime64(9), - local_addr String, - local_port Int64, - remote_addr String, - remote_port Int64, - major_version Int64, - minor_version Int64, - content_type Int64, - req_headers String, - req_method String, - req_path String, - req_body String, - resp_headers String, - resp_status Int64, - resp_message String, - resp_body String, - resp_latency_ns Int64, - hostname String, - event_time DateTime64(3) - ) ENGINE = MergeTree() - PARTITION BY toYYYYMM(event_time) - ORDER BY (hostname, event_time) - )"); - // Insert sample data auto time_col = std::make_shared(9); auto local_addr_col = std::make_shared(); @@ -460,7 +431,6 @@ int main(int argc, char* argv[]) { // Setup ClickHouse client and create test table clickhouse_client = SetupClickHouseClient(); - CreateHttpEventsTable(clickhouse_client.get()); LOG(INFO) << "ClickHouse ready with http_events table"; } else { // Only load CSV if not using ClickHouse @@ -485,8 +455,6 @@ int main(int argc, char* argv[]) { ); auto func_registry = std::make_unique("default_registry"); - // Register both carnot and vizier functions - px::carnot::funcs::RegisterFuncsOrDie(func_registry.get()); px::vizier::funcs::RegisterFuncsOrDie(func_context, func_registry.get()); auto clients_config = @@ -534,7 +502,17 @@ int main(int argc, char* argv[]) { "resp_body", "resp_latency_ns", "hostname", "event_time"}; px::table_store::schema::Relation rel(types, names); auto http_events_table = px::table_store::Table::Create("http_events", rel); - table_store->AddTable("http_events", http_events_table); + // Need to provide a table_id for GetTableIDs() to work + uint64_t http_events_table_id = 1; + table_store->AddTable(http_events_table, "http_events", http_events_table_id); + + auto schema_query = "import px; px.display(px.CreateClickHouseSchemas())"; + auto schema_query_status = carnot->ExecuteQuery(schema_query, sole::uuid4(), px::CurrentTimeNS()); + if (!schema_query_status.ok()) { + LOG(FATAL) << absl::Substitute("Schema query failed to execute: $0", + schema_query_status.msg()); + } + PopulateHttpEventsTable(clickhouse_client.get()); } else if (table != nullptr) { // Add CSV table to table_store table_store->AddTable(table_name, table); diff --git a/src/table_store/schema/row_batch.cc b/src/table_store/schema/row_batch.cc index 4c48701cd5d..d8b01ec09db 100644 --- a/src/table_store/schema/row_batch.cc +++ b/src/table_store/schema/row_batch.cc @@ -38,7 +38,7 @@ std::shared_ptr RowBatch::ColumnAt(int64_t i) const { return colum Status RowBatch::AddColumn(const std::shared_ptr& col) { if (columns_.size() >= desc_.size()) { - return error::InvalidArgument("Schema only allows $0 columns", desc_.size()); + return error::InvalidArgument("Schema only allows $0 columns, got $1", desc_.size(), columns_.size()); } if (col->length() != num_rows_) { return error::InvalidArgument("Schema only allows $0 rows, got $1", num_rows_, col->length()); diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index 02a5ba60f94..3e85501d6f5 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -1138,25 +1138,22 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF("host", "ClickHouse server host", "localhost"), + UDTFArg::Make("host", "ClickHouse server host", "'localhost'"), UDTFArg::Make("port", "ClickHouse server port", 9000), - UDTFArg::Make("username", "ClickHouse username", "default"), - UDTFArg::Make("password", "ClickHouse password", ""), - UDTFArg::Make("database", "ClickHouse database", "default"), - UDTFArg::Make( - "table_filter", "Optional table name filter (empty for all tables)", "")); + UDTFArg::Make("username", "ClickHouse username", "'default'"), + UDTFArg::Make("password", "ClickHouse password", "'test_password'"), + UDTFArg::Make("database", "ClickHouse database", "'default'")); } Status Init(FunctionContext*, types::StringValue host, types::Int64Value port, types::StringValue username, types::StringValue password, - types::StringValue database, types::StringValue table_filter) { + types::StringValue database) { // Store ClickHouse connection parameters host_ = std::string(host); port_ = port.val; username_ = std::string(username); password_ = std::string(password); database_ = std::string(database); - table_filter_ = std::string(table_filter); // Fetch schemas from MDS px::vizier::services::metadata::SchemaRequest req; @@ -1187,13 +1184,7 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF #include #include #include @@ -48,7 +49,6 @@ class LocalMetadataServiceImpl final : public MetadataService::Service { ::grpc::Status GetSchemas(::grpc::ServerContext*, const SchemaRequest*, SchemaResponse* response) override { - LOG(INFO) << "GetSchemas called"; // Get all table IDs from the table store auto table_ids = table_store_->GetTableIDs(); @@ -100,8 +100,70 @@ class LocalMetadataServiceImpl final : public MetadataService::Service { } ::grpc::Status GetAgentInfo(::grpc::ServerContext*, const AgentInfoRequest*, - AgentInfoResponse*) override { - return ::grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "GetAgentInfo not implemented"); + AgentInfoResponse* response) override { + + // Create a single agent metadata entry for local testing + auto* agent_metadata = response->add_info(); + + // Set up Agent information + auto* agent = agent_metadata->mutable_agent(); + auto* agent_info = agent->mutable_info(); + + // Generate a fixed UUID for the agent (using a realistic looking UUID) + // UUID: 12345678-1234-1234-1234-123456789abc + auto* agent_id = agent_info->mutable_agent_id(); + agent_id->set_high_bits(0x1234567812341234); + agent_id->set_low_bits(0x1234123456789abc); + + // Set up host information + auto* host_info = agent_info->mutable_host_info(); + host_info->set_hostname("local-test-host"); + host_info->set_pod_name("local-pem-pod"); + host_info->set_host_ip("127.0.0.1"); + + // Set kernel version (example: 5.15.0) + auto* kernel = host_info->mutable_kernel(); + kernel->set_version(5); + kernel->set_major_rev(15); + kernel->set_minor_rev(0); + host_info->set_kernel_headers_installed(true); + + // Set agent capabilities and parameters + agent_info->set_ip_address("127.0.0.1"); + auto* capabilities = agent_info->mutable_capabilities(); + capabilities->set_collects_data(true); + + auto* parameters = agent_info->mutable_parameters(); + parameters->set_profiler_stack_trace_sample_period_ms(100); + + // Set agent timestamps and ASID + auto current_time_ns = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + agent->set_create_time_ns(current_time_ns); + agent->set_last_heartbeat_ns(current_time_ns); + agent->set_asid(0); + + // Set up AgentStatus + auto* status = agent_metadata->mutable_status(); + status->set_ns_since_last_heartbeat(0); + status->set_state( + px::vizier::services::shared::agent::AgentState::AGENT_STATE_HEALTHY); + + // Set up CarnotInfo + auto* carnot_info = agent_metadata->mutable_carnot_info(); + carnot_info->set_query_broker_address("local-pem:50300"); + auto* carnot_agent_id = carnot_info->mutable_agent_id(); + carnot_agent_id->set_high_bits(0x1234567812341234); + carnot_agent_id->set_low_bits(0x1234123456789abc); + carnot_info->set_has_grpc_server(true); + carnot_info->set_grpc_address("local-pem:50300"); + carnot_info->set_has_data_store(true); + carnot_info->set_processes_data(true); + carnot_info->set_accepts_remote_sources(false); + carnot_info->set_asid(0); + + return ::grpc::Status::OK; } ::grpc::Status GetWithPrefixKey(::grpc::ServerContext*, const WithPrefixKeyRequest*, From 14023b27b72e38eb8416cc318987a1dc02fcf5c8 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Tue, 21 Oct 2025 15:49:09 +0000 Subject: [PATCH 68/86] Add more loggig to row_batch Signed-off-by: Dom Del Nano --- src/table_store/schema/row_batch.cc | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/table_store/schema/row_batch.cc b/src/table_store/schema/row_batch.cc index d8b01ec09db..411c479b3cc 100644 --- a/src/table_store/schema/row_batch.cc +++ b/src/table_store/schema/row_batch.cc @@ -23,6 +23,7 @@ #include #include +#include #include "src/common/base/base.h" #include "src/shared/types/arrow_adapter.h" #include "src/shared/types/type_utils.h" @@ -43,8 +44,18 @@ Status RowBatch::AddColumn(const std::shared_ptr& col) { if (col->length() != num_rows_) { return error::InvalidArgument("Schema only allows $0 rows, got $1", num_rows_, col->length()); } - if (col->type_id() != types::ToArrowType(desc_.type(columns_.size()))) { - return error::InvalidArgument("Column[$0] was given incorrect type", columns_.size()); + auto expected_arrow_type = types::ToArrowType(desc_.type(columns_.size())); + if (col->type_id() != expected_arrow_type) { + auto pixie_type = desc_.type(columns_.size()); + return error::InvalidArgument( + "Column[$0] has incorrect Arrow type. " + "Got Arrow type_id=$1 (type=$2), expected Arrow type_id=$3 for Pixie DataType::$4 (enum value $5)", + columns_.size(), + static_cast(col->type_id()), + col->type()->ToString(), + static_cast(expected_arrow_type), + magic_enum::enum_name(pixie_type), + static_cast(pixie_type)); } columns_.emplace_back(col); From 831fe3f67d6dbce820d7f117e27e8fbee3f994ac Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Tue, 21 Oct 2025 15:50:31 +0000 Subject: [PATCH 69/86] Get carnot_executable working with unmodified stirling http_events table Signed-off-by: Dom Del Nano --- src/carnot/BUILD.bazel | 1 + src/carnot/carnot_executable.cc | 150 +++++++++--------- src/carnot/exec/clickhouse_source_node.cc | 7 +- src/carnot/planner/ir/clickhouse_source_ir.cc | 37 ++++- src/carnot/planner/objects/dataframe.cc | 21 ++- .../socket_tracer/BUILD.bazel | 5 +- src/vizier/funcs/md_udtfs/md_udtfs_impl.h | 8 + 7 files changed, 153 insertions(+), 76 deletions(-) diff --git a/src/carnot/BUILD.bazel b/src/carnot/BUILD.bazel index 2f58957d210..30132ab5f14 100644 --- a/src/carnot/BUILD.bazel +++ b/src/carnot/BUILD.bazel @@ -111,6 +111,7 @@ pl_cc_binary( "//src/common/testing/test_utils:cc_library", "//src/shared/version:cc_library", "//src/shared/version:version_linkstamp", + "//src/stirling/source_connectors/socket_tracer:cc_library", "//src/vizier/funcs:cc_library", "//src/vizier/funcs/context:cc_library", "//src/vizier/services/metadata/local:cc_library", diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index dc250612c2c..f3612c39e94 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -41,6 +41,7 @@ #include "src/vizier/funcs/context/vizier_context.h" #include "src/vizier/funcs/funcs.h" #include "src/vizier/services/metadata/local/local_metadata_service.h" +#include "src/stirling/source_connectors/socket_tracer/http_table.h" // Example clickhouse test usage: // The records inserted into clickhouse exist between -10m and -5m @@ -285,12 +286,19 @@ std::unique_ptr SetupClickHouseClient() { */ void PopulateHttpEventsTable(clickhouse::Client* client) { try { - // Insert sample data + // Get current hostname for the data + char current_hostname[256]; + gethostname(current_hostname, sizeof(current_hostname)); + std::string hostname_str(current_hostname); + + // Insert sample data matching the stirling HTTP table schema (minus upid) auto time_col = std::make_shared(9); - auto local_addr_col = std::make_shared(); - auto local_port_col = std::make_shared(); auto remote_addr_col = std::make_shared(); auto remote_port_col = std::make_shared(); + auto local_addr_col = std::make_shared(); + auto local_port_col = std::make_shared(); + auto trace_role_col = std::make_shared(); + auto encrypted_col = std::make_shared(); // Boolean auto major_version_col = std::make_shared(); auto minor_version_col = std::make_shared(); auto content_type_col = std::make_shared(); @@ -298,11 +306,13 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { auto req_method_col = std::make_shared(); auto req_path_col = std::make_shared(); auto req_body_col = std::make_shared(); + auto req_body_size_col = std::make_shared(); auto resp_headers_col = std::make_shared(); auto resp_status_col = std::make_shared(); auto resp_message_col = std::make_shared(); auto resp_body_col = std::make_shared(); - auto resp_latency_ns_col = std::make_shared(); + auto resp_body_size_col = std::make_shared(); + auto latency_col = std::make_shared(); auto hostname_col = std::make_shared(); auto event_time_col = std::make_shared(3); @@ -310,63 +320,62 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { std::time_t now = std::time(nullptr); LOG(INFO) << "Current time: " << now; - // Get current hostname - char current_hostname[256]; - gethostname(current_hostname, sizeof(current_hostname)); - std::string hostname_str(current_hostname); - - // Add 5 records with the current hostname - for (int i = 0; i < 5; ++i) { + // Add 10 records (5 with current hostname, 5 with different hostnames) + for (int i = 0; i < 10; ++i) { time_col->Append((now - 600 + i * 60) * 1000000000LL); // Convert to nanoseconds - local_addr_col->Append("127.0.0.1"); - local_port_col->Append(8080); + remote_addr_col->Append(absl::StrFormat("192.168.1.%d", 100 + i)); remote_port_col->Append(50000 + i); - major_version_col->Append(1); - minor_version_col->Append(1); - content_type_col->Append(0); - req_headers_col->Append("Content-Type: application/json"); - req_method_col->Append(i % 2 == 0 ? "GET" : "POST"); - req_path_col->Append(absl::StrFormat("/api/v1/resource/%d", i)); - req_body_col->Append(i % 2 == 0 ? "" : "{\"data\": \"test\"}"); - resp_headers_col->Append("Content-Type: application/json"); - resp_status_col->Append(200); - resp_message_col->Append("OK"); - resp_body_col->Append("{\"result\": \"success\"}"); - resp_latency_ns_col->Append(1000000 + i * 100000); - hostname_col->Append(hostname_str); - event_time_col->Append((now - 600 + i * 60) * 1000LL); // Convert to milliseconds - } - - // Add 5 more records with different hostnames for testing - for (int i = 5; i < 10; ++i) { - time_col->Append((now - 600 + i * 60) * 1000000000LL); // Convert to nanoseconds local_addr_col->Append("127.0.0.1"); local_port_col->Append(8080); - remote_addr_col->Append(absl::StrFormat("192.168.1.%d", 100 + i)); - remote_port_col->Append(50000 + i); + + // trace_role: 1 = server, 2 = client (alternate) + trace_role_col->Append(i % 2 == 0 ? 1 : 2); + + // encrypted: false for most, true for some + encrypted_col->Append(i % 3 == 0 ? 1 : 0); + major_version_col->Append(1); minor_version_col->Append(1); - content_type_col->Append(0); + content_type_col->Append(i % 2 == 0 ? 1 : 0); // 1 = JSON, 0 = unknown + req_headers_col->Append("Content-Type: application/json"); req_method_col->Append(i % 2 == 0 ? "GET" : "POST"); req_path_col->Append(absl::StrFormat("/api/v1/resource/%d", i)); - req_body_col->Append(i % 2 == 0 ? "" : "{\"data\": \"test\"}"); + + std::string req_body = i % 2 == 0 ? "" : "{\"data\": \"test\"}"; + req_body_col->Append(req_body); + req_body_size_col->Append(req_body.size()); + resp_headers_col->Append("Content-Type: application/json"); resp_status_col->Append(200); resp_message_col->Append("OK"); - resp_body_col->Append("{\"result\": \"success\"}"); - resp_latency_ns_col->Append(1000000 + i * 100000); - hostname_col->Append(absl::StrFormat("other-host-%d", i % 3)); + + std::string resp_body = "{\"result\": \"success\"}"; + resp_body_col->Append(resp_body); + resp_body_size_col->Append(resp_body.size()); + + latency_col->Append(1000000 + i * 100000); + + // First 5 use current hostname, next 5 use different hostnames + if (i < 5) { + hostname_col->Append(hostname_str); + } else { + hostname_col->Append(absl::StrFormat("other-host-%d", i % 3)); + } + event_time_col->Append((now - 600 + i * 60) * 1000LL); // Convert to milliseconds } clickhouse::Block block; block.AppendColumn("time_", time_col); - block.AppendColumn("local_addr", local_addr_col); - block.AppendColumn("local_port", local_port_col); + // Skip upid column (UINT128 not supported in ClickHouse client) block.AppendColumn("remote_addr", remote_addr_col); block.AppendColumn("remote_port", remote_port_col); + block.AppendColumn("local_addr", local_addr_col); + block.AppendColumn("local_port", local_port_col); + block.AppendColumn("trace_role", trace_role_col); + block.AppendColumn("encrypted", encrypted_col); block.AppendColumn("major_version", major_version_col); block.AppendColumn("minor_version", minor_version_col); block.AppendColumn("content_type", content_type_col); @@ -374,18 +383,20 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { block.AppendColumn("req_method", req_method_col); block.AppendColumn("req_path", req_path_col); block.AppendColumn("req_body", req_body_col); + block.AppendColumn("req_body_size", req_body_size_col); block.AppendColumn("resp_headers", resp_headers_col); block.AppendColumn("resp_status", resp_status_col); block.AppendColumn("resp_message", resp_message_col); block.AppendColumn("resp_body", resp_body_col); - block.AppendColumn("resp_latency_ns", resp_latency_ns_col); + block.AppendColumn("resp_body_size", resp_body_size_col); + block.AppendColumn("latency", latency_col); block.AppendColumn("hostname", hostname_col); block.AppendColumn("event_time", event_time_col); client->Insert("http_events", block); - LOG(INFO) << "http_events table created and populated successfully"; + LOG(INFO) << "http_events table populated successfully with 10 records"; } catch (const std::exception& e) { - LOG(FATAL) << "Failed to create http_events table: " << e.what(); + LOG(FATAL) << "Failed to populate http_events table: " << e.what(); } } @@ -473,39 +484,36 @@ int main(int argc, char* argv[]) { .ConsumeValueOrDie(); if (use_clickhouse) { - // Create http_events table schema in table_store - std::vector types = { - px::types::DataType::TIME64NS, // time_ - px::types::DataType::STRING, // local_addr - px::types::DataType::INT64, // local_port - px::types::DataType::STRING, // remote_addr - px::types::DataType::INT64, // remote_port - px::types::DataType::INT64, // major_version - px::types::DataType::INT64, // minor_version - px::types::DataType::INT64, // content_type - px::types::DataType::STRING, // req_headers - px::types::DataType::STRING, // req_method - px::types::DataType::STRING, // req_path - px::types::DataType::STRING, // req_body - px::types::DataType::STRING, // resp_headers - px::types::DataType::INT64, // resp_status - px::types::DataType::STRING, // resp_message - px::types::DataType::STRING, // resp_body - px::types::DataType::INT64, // resp_latency_ns - px::types::DataType::STRING, // hostname - px::types::DataType::TIME64NS, // event_time - }; - std::vector names = { - "time_", "local_addr", "local_port", "remote_addr", "remote_port", - "major_version", "minor_version", "content_type", "req_headers", "req_method", - "req_path", "req_body", "resp_headers", "resp_status", "resp_message", - "resp_body", "resp_latency_ns", "hostname", "event_time"}; + // Create http_events table schema in table_store using the actual stirling HTTP table definition + // Skip upid column since UINT128 is not supported by ClickHouse client library + std::vector types; + std::vector names; + + // Convert stirling DataTableSchema to table_store Relation + for (const auto& element : px::stirling::kHTTPTable.elements()) { + std::string col_name(element.name()); + if (col_name == "upid") { + continue; // Skip upid (UINT128 not supported in ClickHouse client) + } + if (col_name == "px_info_") { + continue; // Skip px_info_ (debug-only column) + } + types.push_back(element.type()); + names.push_back(col_name); + } + px::table_store::schema::Relation rel(types, names); auto http_events_table = px::table_store::Table::Create("http_events", rel); // Need to provide a table_id for GetTableIDs() to work uint64_t http_events_table_id = 1; table_store->AddTable(http_events_table, "http_events", http_events_table_id); + // Log the schema for debugging + LOG(INFO) << "http_events table schema has " << names.size() << " columns:"; + for (size_t i = 0; i < names.size(); ++i) { + LOG(INFO) << " Column[" << i << "]: " << names[i] << " (type=" << static_cast(types[i]) << ")"; + } + auto schema_query = "import px; px.display(px.CreateClickHouseSchemas())"; auto schema_query_status = carnot->ExecuteQuery(schema_query, sole::uuid4(), px::CurrentTimeNS()); if (!schema_query_status.ok()) { diff --git a/src/carnot/exec/clickhouse_source_node.cc b/src/carnot/exec/clickhouse_source_node.cc index 885dc1eebfd..4ab658d1427 100644 --- a/src/carnot/exec/clickhouse_source_node.cc +++ b/src/carnot/exec/clickhouse_source_node.cc @@ -175,12 +175,15 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock // This is where column type inference happens // Integer types - all map to INT64 in Pixie + + // TODO(ddelnano): UInt8 is a special case since it can map to Pixie's boolean type. + // Figure out how to handle that properly if (type_name == "UInt8") { auto typed_col = ch_column->As(); - arrow::Int64Builder builder; + arrow::BooleanBuilder builder; PX_RETURN_IF_ERROR(builder.Reserve(num_rows)); for (size_t i = 0; i < num_rows; ++i) { - builder.UnsafeAppend(static_cast(typed_col->At(i))); + builder.UnsafeAppend(typed_col->At(i) != 0); } std::shared_ptr array; PX_RETURN_IF_ERROR(builder.Finish(&array)); diff --git a/src/carnot/planner/ir/clickhouse_source_ir.cc b/src/carnot/planner/ir/clickhouse_source_ir.cc index 10bcfb1ef26..e1acfc00b3e 100644 --- a/src/carnot/planner/ir/clickhouse_source_ir.cc +++ b/src/carnot/planner/ir/clickhouse_source_ir.cc @@ -48,6 +48,10 @@ Status ClickHouseSourceIR::ToProto(planpb::Operator* op) const { DCHECK(is_type_resolved()); DCHECK_EQ(column_index_map_.size(), resolved_table_type()->ColumnNames().size()); for (const auto& [idx, col_name] : Enumerate(resolved_table_type()->ColumnNames())) { + if (col_name == "upid") { + LOG(INFO) << "Skipping upid column in ClickHouse source proto."; + continue; + } pb->add_column_names(col_name); auto val_type = std::static_pointer_cast( resolved_table_type()->GetColumnType(col_name).ConsumeValueOrDie()); @@ -122,17 +126,48 @@ Status ClickHouseSourceIR::ResolveType(CompilerState* compiler_state) { auto table_relation = relation_it->second; auto full_table_type = TableType::Create(table_relation); if (select_all()) { + // For select_all, add all table columns plus ClickHouse-added columns (hostname, event_time) std::vector column_indices; - for (int64_t i = 0; i < static_cast(table_relation.NumColumns()); ++i) { + int64_t table_column_count = static_cast(table_relation.NumColumns()); + + // Add all table columns + for (int64_t i = 0; i < table_column_count; ++i) { column_indices.push_back(i); } + + // Add ClickHouse-added columns + full_table_type->AddColumn("hostname", ValueType::Create(types::DataType::STRING, types::SemanticType::ST_NONE)); + column_indices.push_back(table_column_count); // hostname is after all table columns + + full_table_type->AddColumn("event_time", ValueType::Create(types::DataType::TIME64NS, types::SemanticType::ST_TIME_NS)); + column_indices.push_back(table_column_count + 1); // event_time is after hostname + SetColumnIndexMap(column_indices); return SetResolvedType(full_table_type); } std::vector column_indices; auto new_table = TableType::Create(); + + // Calculate the index offset for ClickHouse-added columns (after all table columns) + int64_t table_column_count = static_cast(table_relation.NumColumns()); + auto next_count = 0; + for (const auto& col_name : column_names_) { + // Handle special ClickHouse-added columns that don't exist in the source table + if (col_name == "hostname") { + new_table->AddColumn(col_name, ValueType::Create(types::DataType::STRING, types::SemanticType::ST_NONE)); + // hostname is added by ClickHouse after all table columns + column_indices.push_back(table_column_count + (next_count++)); + continue; + } + if (col_name == "event_time") { + new_table->AddColumn(col_name, ValueType::Create(types::DataType::TIME64NS, types::SemanticType::ST_TIME_NS)); + // event_time is added by ClickHouse after hostname + column_indices.push_back(table_column_count + (next_count++)); + continue; + } + PX_ASSIGN_OR_RETURN(auto col_type, full_table_type->GetColumnType(col_name)); new_table->AddColumn(col_name, col_type); column_indices.push_back(table_relation.GetColumnIndex(col_name)); diff --git a/src/carnot/planner/objects/dataframe.cc b/src/carnot/planner/objects/dataframe.cc index fbaabda1844..2e68974c894 100644 --- a/src/carnot/planner/objects/dataframe.cc +++ b/src/carnot/planner/objects/dataframe.cc @@ -17,6 +17,9 @@ */ #include "src/carnot/planner/objects/dataframe.h" + +#include + #include "src/carnot/planner/ast/ast_visitor.h" #include "src/carnot/planner/ir/ast_utils.h" #include "src/carnot/planner/ir/clickhouse_source_ir.h" @@ -117,8 +120,24 @@ StatusOr DataFrameConstructor(CompilerState* compiler_state, IR* gr if (is_clickhouse) { // Create ClickHouseSourceIR + // Note: hostname and event_time columns are handled in ClickHouseSourceIR::ResolveType + // Only add them if the user explicitly selected some columns + std::vector clickhouse_columns = columns; + + if (!columns.empty()) { + // User selected specific columns - add hostname and event_time if not already present + if (std::find(clickhouse_columns.begin(), clickhouse_columns.end(), "hostname") == clickhouse_columns.end()) { + clickhouse_columns.push_back("hostname"); + } + + if (std::find(clickhouse_columns.begin(), clickhouse_columns.end(), "event_time") == clickhouse_columns.end()) { + clickhouse_columns.push_back("event_time"); + } + } + // If columns is empty, select_all() will be true and ResolveType will handle adding all columns + PX_ASSIGN_OR_RETURN(ClickHouseSourceIR * clickhouse_source_op, - graph->CreateNode(ast, table_name, columns)); + graph->CreateNode(ast, table_name, clickhouse_columns)); if (!NoneObject::IsNoneObject(args.GetArg("start_time"))) { PX_ASSIGN_OR_RETURN(ExpressionIR * start_time, diff --git a/src/stirling/source_connectors/socket_tracer/BUILD.bazel b/src/stirling/source_connectors/socket_tracer/BUILD.bazel index 47301fffdb5..893de0485a5 100644 --- a/src/stirling/source_connectors/socket_tracer/BUILD.bazel +++ b/src/stirling/source_connectors/socket_tracer/BUILD.bazel @@ -16,7 +16,10 @@ load("//bazel:pl_build_system.bzl", "pl_cc_binary", "pl_cc_bpf_test", "pl_cc_library", "pl_cc_test") -package(default_visibility = ["//src/stirling:__subpackages__"]) +package(default_visibility = [ + "//src/stirling:__subpackages__", + "//src/carnot:__subpackages__", +]) pl_cc_library( name = "cc_library", diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index 3e85501d6f5..88133cdef2a 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -1271,6 +1271,14 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF Date: Wed, 22 Oct 2025 03:20:54 +0000 Subject: [PATCH 70/86] Remove upid (UINT128) and px_info column omission Signed-off-by: Dom Del Nano --- src/carnot/carnot_executable.cc | 23 +++++---- src/carnot/exec/clickhouse_source_node.cc | 47 +++++++++++++++++++ src/carnot/planner/ir/clickhouse_source_ir.cc | 4 -- src/vizier/funcs/md_udtfs/md_udtfs_impl.h | 10 +--- 4 files changed, 62 insertions(+), 22 deletions(-) diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index f3612c39e94..7b05dc07131 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -291,8 +291,9 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { gethostname(current_hostname, sizeof(current_hostname)); std::string hostname_str(current_hostname); - // Insert sample data matching the stirling HTTP table schema (minus upid) + // Insert sample data matching the stirling HTTP table schema (upid as String with high:low format) auto time_col = std::make_shared(9); + auto upid_col = std::make_shared(); auto remote_addr_col = std::make_shared(); auto remote_port_col = std::make_shared(); auto local_addr_col = std::make_shared(); @@ -313,6 +314,9 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { auto resp_body_col = std::make_shared(); auto resp_body_size_col = std::make_shared(); auto latency_col = std::make_shared(); +#ifndef NDEBUG + auto px_info_col = std::make_shared(); +#endif auto hostname_col = std::make_shared(); auto event_time_col = std::make_shared(3); @@ -324,6 +328,11 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { for (int i = 0; i < 10; ++i) { time_col->Append((now - 600 + i * 60) * 1000000000LL); // Convert to nanoseconds + // Generate upid as UINT128 in high:low string format + uint64_t upid_high = 1000 + i; + uint64_t upid_low = 2000 + i; + upid_col->Append(absl::StrFormat("%d:%d", upid_high, upid_low)); + remote_addr_col->Append(absl::StrFormat("192.168.1.%d", 100 + i)); remote_port_col->Append(50000 + i); local_addr_col->Append("127.0.0.1"); @@ -356,6 +365,9 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { resp_body_size_col->Append(resp_body.size()); latency_col->Append(1000000 + i * 100000); +#ifndef NDEBUG + px_info_col->Append(""); +#endif // First 5 use current hostname, next 5 use different hostnames if (i < 5) { @@ -369,7 +381,7 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { clickhouse::Block block; block.AppendColumn("time_", time_col); - // Skip upid column (UINT128 not supported in ClickHouse client) + block.AppendColumn("upid", upid_col); block.AppendColumn("remote_addr", remote_addr_col); block.AppendColumn("remote_port", remote_port_col); block.AppendColumn("local_addr", local_addr_col); @@ -485,19 +497,12 @@ int main(int argc, char* argv[]) { if (use_clickhouse) { // Create http_events table schema in table_store using the actual stirling HTTP table definition - // Skip upid column since UINT128 is not supported by ClickHouse client library std::vector types; std::vector names; // Convert stirling DataTableSchema to table_store Relation for (const auto& element : px::stirling::kHTTPTable.elements()) { std::string col_name(element.name()); - if (col_name == "upid") { - continue; // Skip upid (UINT128 not supported in ClickHouse client) - } - if (col_name == "px_info_") { - continue; // Skip px_info_ (debug-only column) - } types.push_back(element.type()); names.push_back(col_name); } diff --git a/src/carnot/exec/clickhouse_source_node.cc b/src/carnot/exec/clickhouse_source_node.cc index 4ab658d1427..a27e4363a12 100644 --- a/src/carnot/exec/clickhouse_source_node.cc +++ b/src/carnot/exec/clickhouse_source_node.cc @@ -171,9 +171,41 @@ StatusOr> ClickHouseSourceNode::ConvertClickHouseBlock const auto& ch_column = block[col_idx]; const auto& type_name = ch_column->Type()->GetName(); + // Check what the expected output type is for this column + auto expected_type = output_descriptor_->type(col_idx); + // For now, implement conversion for common types // This is where column type inference happens + // Special case: String in ClickHouse that should be UINT128 in Pixie + if (type_name == "String" && expected_type == types::DataType::UINT128) { + auto typed_col = ch_column->As(); + auto builder = types::MakeArrowBuilder(types::DataType::UINT128, arrow::default_memory_pool()); + PX_RETURN_IF_ERROR(builder->Reserve(num_rows)); + + for (size_t i = 0; i < num_rows; ++i) { + std::string value(typed_col->At(i)); + + // Parse "high:low" format + size_t colon_pos = value.find(':'); + if (colon_pos == std::string::npos) { + return error::InvalidArgument("Invalid UINT128 string format: $0 (expected high:low)", value); + } + + uint64_t high = std::stoull(value.substr(0, colon_pos)); + uint64_t low = std::stoull(value.substr(colon_pos + 1)); + absl::uint128 uint128_val = absl::MakeUint128(high, low); + + PX_RETURN_IF_ERROR(table_store::schema::CopyValue(builder.get(), uint128_val)); + } + + std::shared_ptr array; + PX_RETURN_IF_ERROR(builder->Finish(&array)); + PX_RETURN_IF_ERROR(row_batch->AddColumn(array)); + + continue; + } + // Integer types - all map to INT64 in Pixie // TODO(ddelnano): UInt8 is a special case since it can map to Pixie's boolean type. @@ -547,6 +579,9 @@ Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { case types::DataType::INT64: builder = std::make_shared(); break; + case types::DataType::UINT128: + builder = types::MakeArrowBuilder(types::DataType::UINT128, arrow::default_memory_pool()); + break; case types::DataType::FLOAT64: builder = std::make_shared(); break; @@ -586,6 +621,18 @@ Status ClickHouseSourceNode::GenerateNextImpl(ExecState* exec_state) { } break; } + case types::DataType::UINT128: { + auto typed_array = std::static_pointer_cast(array); + for (int i = 0; i < typed_array->length(); i++) { + if (typed_array->IsNull(i)) { + PX_RETURN_IF_ERROR(builder->AppendNull()); + } else { + auto val = types::GetValueFromArrowArray(array.get(), i); + PX_RETURN_IF_ERROR(table_store::schema::CopyValue(builder.get(), val)); + } + } + break; + } case types::DataType::TIME64NS: { auto typed_array = std::static_pointer_cast(array); auto typed_builder = std::static_pointer_cast(builder); diff --git a/src/carnot/planner/ir/clickhouse_source_ir.cc b/src/carnot/planner/ir/clickhouse_source_ir.cc index e1acfc00b3e..3467285d246 100644 --- a/src/carnot/planner/ir/clickhouse_source_ir.cc +++ b/src/carnot/planner/ir/clickhouse_source_ir.cc @@ -48,10 +48,6 @@ Status ClickHouseSourceIR::ToProto(planpb::Operator* op) const { DCHECK(is_type_resolved()); DCHECK_EQ(column_index_map_.size(), resolved_table_type()->ColumnNames().size()); for (const auto& [idx, col_name] : Enumerate(resolved_table_type()->ColumnNames())) { - if (col_name == "upid") { - LOG(INFO) << "Skipping upid column in ClickHouse source proto."; - continue; - } pb->add_column_names(col_name); auto val_type = std::static_pointer_cast( resolved_table_type()->GetColumnType(col_name).ConsumeValueOrDie()); diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index 88133cdef2a..9aba73495f8 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -1102,7 +1102,7 @@ inline std::string PixieTypeToClickHouseType(types::DataType pixie_type, // Default to DateTime64(9) for other time columns return "DateTime64(9)"; case types::DataType::UINT128: - // ClickHouse doesn't have native UINT128, use String representation + // ClickHouse doesn't have native UINT128, use String representation (high:low format) return "String"; default: return "String"; // Fallback to String for unsupported types @@ -1271,14 +1271,6 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF Date: Wed, 22 Oct 2025 03:40:33 +0000 Subject: [PATCH 71/86] Get all clickhouse tests passing and carnot_executable adhoc test Signed-off-by: Dom Del Nano --- .../exec/clickhouse_source_node_test.cc | 2 -- .../distributedpb/distributed_plan.proto | 19 +++++++++++++++++++ src/carnot/planpb/test_proto.h | 1 - 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/carnot/exec/clickhouse_source_node_test.cc b/src/carnot/exec/clickhouse_source_node_test.cc index 1712de15a8f..ce0f6e7757a 100644 --- a/src/carnot/exec/clickhouse_source_node_test.cc +++ b/src/carnot/exec/clickhouse_source_node_test.cc @@ -262,7 +262,6 @@ TEST_F(ClickHouseSourceNodeTest, EmptyResultSet) { ch_op->add_column_types(types::DataType::STRING); ch_op->add_column_types(types::DataType::FLOAT64); ch_op->set_timestamp_column("timestamp"); - ch_op->set_partition_column("partition_key"); ch_op->set_start_time(1000000000000000000LL); // Year 2001 in nanoseconds ch_op->set_end_time(9223372036854775807LL); // Max int64 @@ -310,7 +309,6 @@ TEST_F(ClickHouseSourceNodeTest, FilteredQuery) { ch_op->add_column_types(types::DataType::STRING); ch_op->add_column_types(types::DataType::FLOAT64); ch_op->set_timestamp_column("timestamp"); - ch_op->set_partition_column("partition_key"); ch_op->set_start_time(1000000000000000000LL); // Year 2001 in nanoseconds ch_op->set_end_time(9223372036854775807LL); // Max int64 diff --git a/src/carnot/planner/distributedpb/distributed_plan.proto b/src/carnot/planner/distributedpb/distributed_plan.proto index b5a4e8d08a1..581b8748d37 100644 --- a/src/carnot/planner/distributedpb/distributed_plan.proto +++ b/src/carnot/planner/distributedpb/distributed_plan.proto @@ -142,6 +142,23 @@ message OTelEndpointConfig { int64 timeout = 4; } +// ClickHouseConfig contains the connection parameters for ClickHouse. +message ClickHouseConfig { + // The hostname of the node executing the query. + string hostname = 1; + // The ClickHouse server host. + string host = 2; + // The ClickHouse server port. + int32 port = 3; + // The ClickHouse username. + string username = 4; + // The ClickHouse password. + string password = 5; + // The ClickHouse database name. + string database = 6; +} + + message PluginConfig { // The start_time of the script in nanoseconds. int64 start_time_ns = 1; @@ -183,6 +200,8 @@ message LogicalPlannerState { // PluginConfig contains plugin related configuration. PluginConfig plugin_config = 9; + ClickHouseConfig clickhouse_config = 11; + // Debug options for the compiler. DebugInfo debug_info = 10; } diff --git a/src/carnot/planpb/test_proto.h b/src/carnot/planpb/test_proto.h index 474cfde2ad4..53487da1364 100644 --- a/src/carnot/planpb/test_proto.h +++ b/src/carnot/planpb/test_proto.h @@ -211,7 +211,6 @@ column_types: INT64 column_types: STRING column_types: FLOAT64 timestamp_column: "timestamp" -partition_column: "partition_key" start_time: 1000000000000000000 end_time: 9223372036854775807 )"; From d4fa954592cb0519a1e12a58c096ced49eeb4cf9 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Thu, 30 Oct 2025 00:07:56 +0000 Subject: [PATCH 72/86] Add use_if_not_exists for px.CreateClickHouseSchemas and test ClickHouseSourceNode against non Pixie clickhouse data (vector populated) Signed-off-by: Dom Del Nano --- src/carnot/carnot_executable.cc | 130 +++++++++++++-- src/carnot/planner/ir/BUILD.bazel | 1 + src/carnot/planner/ir/clickhouse_source_ir.cc | 156 +++++++++++++++++- src/carnot/planner/ir/clickhouse_source_ir.h | 8 + src/vizier/funcs/md_udtfs/md_udtfs_impl.h | 25 ++- 5 files changed, 289 insertions(+), 31 deletions(-) diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index 7b05dc07131..f8ffb4def19 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -46,6 +46,10 @@ // Example clickhouse test usage: // The records inserted into clickhouse exist between -10m and -5m // bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --query="import px;df = px.DataFrame('http_events', clickhouse=True, start_time='-10m', end_time='-9m'); px.display(df)" --output_file=$(pwd)/output.csv +// +// Testing existing ClickHouse table (kubescape_stix) table population and query: +// bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --start_clickhouse=false --query="import px;df = px.DataFrame('kubescape_stix', clickhouse=True, start_time='-10m'); px.display(df)" --output_file=$(pwd)/output.csv + DEFINE_string(input_file, gflags::StringFromEnv("INPUT_FILE", ""), "The csv containing data to run the query on."); @@ -62,6 +66,9 @@ DEFINE_int64(rowbatch_size, gflags::Int64FromEnv("ROWBATCH_SIZE", 100), "The size of the rowbatches."); DEFINE_bool(use_clickhouse, gflags::BoolFromEnv("USE_CLICKHOUSE", false), + "Whether to populate a ClickHouse database."); + +DEFINE_bool(start_clickhouse, gflags::BoolFromEnv("START_CLICKHOUSE", true), "Whether to start a ClickHouse container with test data."); using px::types::DataType; @@ -412,6 +419,95 @@ void PopulateHttpEventsTable(clickhouse::Client* client) { } } +/** + * Checks if a table exists in ClickHouse. + */ +bool TableExists(clickhouse::Client* client, const std::string& table_name) { + try { + std::string query = absl::Substitute("EXISTS TABLE $0", table_name); + bool exists = false; + client->Select(query, [&exists](const clickhouse::Block& block) { + if (block.GetRowCount() > 0) { + auto result_col = block[0]->As(); + exists = result_col->At(0) == 1; + } + }); + return exists; + } catch (const std::exception& e) { + LOG(WARNING) << "Failed to check if table " << table_name << " exists: " << e.what(); + return false; + } +} + +/** + * Populates the kubescape_stix table with sample STIX data if it exists. + */ +void PopulateKubescapeStixTable(clickhouse::Client* client) { + try { + // Check if table exists + if (!TableExists(client, "kubescape_stix")) { + LOG(INFO) << "kubescape_stix table does not exist, skipping population"; + return; + } + + LOG(INFO) << "Populating kubescape_stix table with sample data..."; + + // Get current hostname + char current_hostname[256]; + gethostname(current_hostname, sizeof(current_hostname)); + std::string hostname_str(current_hostname); + + // Create columns for the kubescape_stix table + auto timestamp_col = std::make_shared(); + auto pod_name_col = std::make_shared(); + auto namespace_col = std::make_shared(); + auto data_col = std::make_shared(); + auto hostname_col = std::make_shared(); + auto event_time_col = std::make_shared(3); + + // Add sample STIX data + std::time_t now = std::time(nullptr); + + // Add 5 sample records with different pods and namespaces + std::vector pod_names = {"web-pod-1", "api-pod-2", "db-pod-3", "cache-pod-4", "worker-pod-5"}; + std::vector namespaces = {"production", "staging", "development", "production", "staging"}; + + for (int i = 0; i < 5; ++i) { + // Timestamp as ISO 8601 string + std::time_t record_time = now - (300 - i * 60); // 5 minutes ago to 1 minute ago + char time_buf[30]; + std::strftime(time_buf, sizeof(time_buf), "%Y-%m-%dT%H:%M:%SZ", std::gmtime(&record_time)); + timestamp_col->Append(std::string(time_buf)); + + pod_name_col->Append(pod_names[i]); + namespace_col->Append(namespaces[i]); + + // Add unique STIX data for each record + std::string stix_data = absl::Substitute( + R"({"type":"bundle","id":"bundle--$0","objects":[{"type":"vulnerability","id":"vuln--$0","severity":"$1"}]})", + i, (i % 3 == 0 ? "high" : "medium")); + data_col->Append(stix_data); + + hostname_col->Append(hostname_str); + event_time_col->Append(record_time * 1000LL); // Convert to milliseconds + } + + // Create block and insert + clickhouse::Block block; + block.AppendColumn("timestamp", timestamp_col); + block.AppendColumn("pod_name", pod_name_col); + block.AppendColumn("namespace", namespace_col); + block.AppendColumn("data", data_col); + block.AppendColumn("hostname", hostname_col); + block.AppendColumn("event_time", event_time_col); + + client->Insert("kubescape_stix", block); + LOG(INFO) << "kubescape_stix table populated successfully with 5 records"; + } catch (const std::exception& e) { + LOG(WARNING) << "Failed to populate kubescape_stix table: " << e.what(); + } +} + } // namespace int main(int argc, char* argv[]) { @@ -431,21 +527,24 @@ int main(int argc, char* argv[]) { std::shared_ptr table; if (use_clickhouse) { - LOG(INFO) << "Starting ClickHouse container..."; - clickhouse_server = - std::make_unique(px::testing::BazelRunfilePath(kClickHouseImage), - "clickhouse_carnot", kClickHouseReadyMessage); - - std::vector options = { - absl::Substitute("--publish=$0:$0", kClickHousePort), - "--env=CLICKHOUSE_PASSWORD=test_password", - "--network=host", - }; - - auto status = clickhouse_server->Run(std::chrono::seconds{60}, options, {}, true, - std::chrono::seconds{300}); - if (!status.ok()) { - LOG(FATAL) << "Failed to start ClickHouse container: " << status.msg(); + + if (FLAGS_start_clickhouse) { + LOG(INFO) << "Starting ClickHouse container..."; + clickhouse_server = + std::make_unique(px::testing::BazelRunfilePath(kClickHouseImage), + "clickhouse_carnot", kClickHouseReadyMessage); + + std::vector options = { + absl::Substitute("--publish=$0:$0", kClickHousePort), + "--env=CLICKHOUSE_PASSWORD=test_password", + "--network=host", + }; + + auto status = clickhouse_server->Run(std::chrono::seconds{60}, options, {}, true, + std::chrono::seconds{300}); + if (!status.ok()) { + LOG(FATAL) << "Failed to start ClickHouse container: " << status.msg(); + } } // Give ClickHouse time to initialize @@ -526,6 +625,7 @@ int main(int argc, char* argv[]) { schema_query_status.msg()); } PopulateHttpEventsTable(clickhouse_client.get()); + PopulateKubescapeStixTable(clickhouse_client.get()); } else if (table != nullptr) { // Add CSV table to table_store table_store->AddTable(table_name, table); diff --git a/src/carnot/planner/ir/BUILD.bazel b/src/carnot/planner/ir/BUILD.bazel index 3cb11930470..6a064c629f0 100644 --- a/src/carnot/planner/ir/BUILD.bazel +++ b/src/carnot/planner/ir/BUILD.bazel @@ -47,6 +47,7 @@ pl_cc_library( "//src/carnot/planpb:plan_pl_cc_proto", "//src/shared/metadata:cc_library", "//src/shared/metadatapb:metadata_pl_cc_proto", + "@com_github_clickhouse_clickhouse_cpp//:clickhouse_cpp", "@com_github_vinzenz_libpypa//:libpypa", ], ) diff --git a/src/carnot/planner/ir/clickhouse_source_ir.cc b/src/carnot/planner/ir/clickhouse_source_ir.cc index 3467285d246..ba4bfde0410 100644 --- a/src/carnot/planner/ir/clickhouse_source_ir.cc +++ b/src/carnot/planner/ir/clickhouse_source_ir.cc @@ -17,6 +17,9 @@ */ #include "src/carnot/planner/ir/clickhouse_source_ir.h" + +#include + #include "src/carnot/planner/ir/ir.h" namespace px { @@ -66,7 +69,8 @@ Status ClickHouseSourceIR::ToProto(planpb::Operator* op) const { pb->set_batch_size(1024); // Set default timestamp and partition columns (can be configured later) - pb->set_timestamp_column("time_"); + // TODO(ddelnano): This needs to be set properly. + pb->set_timestamp_column("event_time"); pb->set_partition_column("hostname"); return Status::OK(); @@ -114,12 +118,148 @@ Status ClickHouseSourceIR::CopyFromNodeImpl(const IRNode* node, return Status::OK(); } +StatusOr ClickHouseSourceIR::ClickHouseTypeToPixieType( + const std::string& ch_type_name) { + // Integer types - Pixie only supports INT64 + if (ch_type_name == "UInt8" || ch_type_name == "UInt16" || ch_type_name == "UInt32" || + ch_type_name == "UInt64" || ch_type_name == "Int8" || ch_type_name == "Int16" || + ch_type_name == "Int32" || ch_type_name == "Int64") { + return types::DataType::INT64; + } + // UInt128 + if (ch_type_name == "UInt128") { + return types::DataType::UINT128; + } + // Floating point types - Pixie only supports FLOAT64 + if (ch_type_name == "Float32" || ch_type_name == "Float64") { + return types::DataType::FLOAT64; + } + // String types + if (ch_type_name == "String" || ch_type_name == "FixedString" || + absl::StartsWith(ch_type_name, "FixedString(")) { + return types::DataType::STRING; + } + // Date/time types + if (ch_type_name == "DateTime" || absl::StartsWith(ch_type_name, "DateTime64")) { + return types::DataType::TIME64NS; + } + // Boolean type (stored as UInt8 in ClickHouse) + if (ch_type_name == "Bool") { + return types::DataType::BOOLEAN; + } + // Default to String for unsupported types + return types::DataType::STRING; +} + +StatusOr ClickHouseSourceIR::InferRelationFromClickHouse( + CompilerState* compiler_state, const std::string& table_name) { + // Check if ClickHouse config is available + auto* ch_config = compiler_state->clickhouse_config(); + PX_UNUSED(ch_config); + // TODO(ddelnano): Add this check in when the configuration plumbing is done. + /* if (ch_config == nullptr) { */ + /* return error::Internal( */ + /* "ClickHouse config not available in compiler state. Cannot infer schema for table '$0'.", */ + /* table_name); */ + /* } */ + + // Set up ClickHouse client options + std::string host = true ? "localhost" : ch_config->host(); + int port = true ? 9000 : ch_config->port(); + std::string username = true ? "default" : ch_config->username(); + std::string password = true ? "test_password" : ch_config->password(); + std::string database = true ? "default" : ch_config->database(); + + clickhouse::ClientOptions options; + options.SetHost(host); + options.SetPort(port); + options.SetUser(username); + options.SetPassword(password); + options.SetDefaultDatabase(database); + + // Create ClickHouse client + std::unique_ptr client; + try { + client = std::make_unique(options); + } catch (const std::exception& e) { + return error::Internal("Failed to connect to ClickHouse at $0:$1 - $2", + host, port, e.what()); + } + + // Query ClickHouse for table schema using DESCRIBE TABLE + std::string describe_query = absl::Substitute("DESCRIBE TABLE $0", table_name); + + table_store::schema::Relation relation; + bool query_executed = false; + + try { + client->Select(describe_query, [&](const clickhouse::Block& block) { + query_executed = true; + // DESCRIBE TABLE returns columns: name, type, default_type, default_expression, comment, + // codec_expression, ttl_expression + size_t num_rows = block.GetRowCount(); + + if (num_rows == 0) { + return; + } + + // Get the column name and type columns + auto name_column = block[0]->As(); + auto type_column = block[1]->As(); + + for (size_t i = 0; i < num_rows; ++i) { + std::string col_name = std::string(name_column->At(i)); + std::string col_type = std::string(type_column->At(i)); + + // Convert ClickHouse type to Pixie type + auto pixie_type_or = ClickHouseTypeToPixieType(col_type); + if (!pixie_type_or.ok()) { + LOG(WARNING) << "Failed to convert ClickHouse type '" << col_type + << "' for column '" << col_name << "'. Using STRING as fallback."; + relation.AddColumn(types::DataType::STRING, col_name, types::SemanticType::ST_NONE); + } else { + types::DataType pixie_type = pixie_type_or.ConsumeValueOrDie(); + // Determine semantic type based on column name or type + types::SemanticType semantic_type = types::SemanticType::ST_NONE; + if (pixie_type == types::DataType::TIME64NS) { + semantic_type = types::SemanticType::ST_TIME_NS; + } + relation.AddColumn(pixie_type, col_name, semantic_type); + } + } + }); + } catch (const std::exception& e) { + return error::Internal("Failed to query ClickHouse table schema for '$0': $1", + table_name, e.what()); + } + + if (!query_executed || relation.NumColumns() == 0) { + return error::Internal("Table '$0' not found in ClickHouse or has no columns.", table_name); + } + + return relation; +} + Status ClickHouseSourceIR::ResolveType(CompilerState* compiler_state) { + table_store::schema::Relation table_relation; + + auto existing_relation = false; auto relation_it = compiler_state->relation_map()->find(table_name()); if (relation_it == compiler_state->relation_map()->end()) { - return CreateIRNodeError("Table '$0' not found.", table_name_); + // Table not found in relation_map, try to infer from ClickHouse + LOG(INFO) << absl::Substitute("Table '$0' not found in relation_map. Attempting to infer schema from ClickHouse...", table_name()); + + auto relation_or = InferRelationFromClickHouse(compiler_state, table_name()); + if (!relation_or.ok()) { + return CreateIRNodeError("Table '$0' not found in relation_map and failed to infer from ClickHouse: $1", + table_name_, relation_or.status().msg()); + } + + table_relation = relation_or.ConsumeValueOrDie(); + } else { + table_relation = relation_it->second; + existing_relation = true; } - auto table_relation = relation_it->second; auto full_table_type = TableType::Create(table_relation); if (select_all()) { // For select_all, add all table columns plus ClickHouse-added columns (hostname, event_time) @@ -132,11 +272,13 @@ Status ClickHouseSourceIR::ResolveType(CompilerState* compiler_state) { } // Add ClickHouse-added columns - full_table_type->AddColumn("hostname", ValueType::Create(types::DataType::STRING, types::SemanticType::ST_NONE)); - column_indices.push_back(table_column_count); // hostname is after all table columns + if (existing_relation) { + full_table_type->AddColumn("hostname", ValueType::Create(types::DataType::STRING, types::SemanticType::ST_NONE)); + column_indices.push_back(table_column_count); // hostname is after all table columns - full_table_type->AddColumn("event_time", ValueType::Create(types::DataType::TIME64NS, types::SemanticType::ST_TIME_NS)); - column_indices.push_back(table_column_count + 1); // event_time is after hostname + full_table_type->AddColumn("event_time", ValueType::Create(types::DataType::TIME64NS, types::SemanticType::ST_TIME_NS)); + column_indices.push_back(table_column_count + 1); // event_time is after hostname + } SetColumnIndexMap(column_indices); return SetResolvedType(full_table_type); diff --git a/src/carnot/planner/ir/clickhouse_source_ir.h b/src/carnot/planner/ir/clickhouse_source_ir.h index 6b793196a31..988586211cc 100644 --- a/src/carnot/planner/ir/clickhouse_source_ir.h +++ b/src/carnot/planner/ir/clickhouse_source_ir.h @@ -29,6 +29,7 @@ #include "src/carnot/planner/types/types.h" #include "src/common/base/base.h" #include "src/shared/types/types.h" +#include "src/table_store/schema/relation.h" namespace px { namespace carnot { @@ -89,6 +90,13 @@ class ClickHouseSourceIR : public OperatorIR { Status ResolveType(CompilerState* compiler_state); protected: + // Helper method to query ClickHouse for table schema and create a Relation + StatusOr InferRelationFromClickHouse( + CompilerState* compiler_state, const std::string& table_name); + + // Helper method to convert ClickHouse type string to Pixie DataType + static StatusOr ClickHouseTypeToPixieType(const std::string& ch_type_name); + StatusOr> PruneOutputColumnsToImpl( const absl::flat_hash_set& output_colnames) override; diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index 9aba73495f8..54379ff296c 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -1142,18 +1142,20 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF("port", "ClickHouse server port", 9000), UDTFArg::Make("username", "ClickHouse username", "'default'"), UDTFArg::Make("password", "ClickHouse password", "'test_password'"), - UDTFArg::Make("database", "ClickHouse database", "'default'")); + UDTFArg::Make("database", "ClickHouse database", "'default'"), + UDTFArg::Make("use_if_not_exists", "Whether to use IF NOT EXISTS in CREATE TABLE statements", true)); } Status Init(FunctionContext*, types::StringValue host, types::Int64Value port, types::StringValue username, types::StringValue password, - types::StringValue database) { + types::StringValue database, types::BoolValue use_if_not_exists) { // Store ClickHouse connection parameters host_ = std::string(host); port_ = port.val; username_ = std::string(username); password_ = std::string(password); database_ = std::string(database); + use_if_not_exists_ = use_if_not_exists.val; // Fetch schemas from MDS px::vizier::services::metadata::SchemaRequest req; @@ -1206,12 +1208,14 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTFExecute(absl::Substitute("DROP TABLE IF EXISTS $0", table_name)); + // Drop existing table if not using IF NOT EXISTS + if (!use_if_not_exists_) { + clickhouse_client_->Execute(absl::Substitute("DROP TABLE IF EXISTS $0", table_name)); + } // Create new table clickhouse_client_->Execute(create_table_sql); @@ -1261,7 +1265,8 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF column_defs; // Add columns from schema @@ -1285,13 +1290,14 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF Date: Thu, 5 Jun 2025 18:28:47 +0200 Subject: [PATCH 73/86] docu: adding how to remove the hsts blocker for chrome and the syntax for socks proxy in gcloud ssh Signed-off-by: entlein --- src/ui/README.md | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/src/ui/README.md b/src/ui/README.md index 088a1714bb6..5e09be041b8 100644 --- a/src/ui/README.md +++ b/src/ui/README.md @@ -2,7 +2,7 @@ ## Export environment variables for webpack ``` -export PL_GATEWAY_URL="https://$(dig +short prod.withpixie.ai @8.8.8.8)" +export PL_GATEWAY_URL="https://$(dig +short work.getcosmic.ai @8.8.8.8)" export PL_BUILD_TYPE=prod export SELFSIGN_CERT_FILE="$HOME/.prod.cert" export SELFSIGN_CERT_KEY="$HOME/.prod.key" @@ -16,13 +16,13 @@ mkcert -install mkcert \ -cert-file $SELFSIGN_CERT_FILE \ -key-file $SELFSIGN_CERT_KEY \ - prod.withpixie.ai "*.prod.withpixie.ai" localhost 127.0.0.1 ::1 + work.getcosmic.ai "*.work.getcosmic.ai" localhost 127.0.0.1 ::1 ``` ## Add the following domain to /etc/hosts, or /private/etc/hosts for Mac Replace site-name with your test site name. ``` -127.0.0.1 prod.withpixie.ai .prod.withpixie.ai id.prod.withpixie.ai +127.0.0.1 work.getcosmic.ai test.work.getcosmic.ai id.work.getcosmic.ai ``` ## Run the webpack devserver @@ -31,8 +31,27 @@ cd src/ui yarn install yarn dev ``` +This will expose the UI locally at 8080 ## Access the frontend on the browser -Navigate to https://prod.withpixie.ai:8080/ -Note the https and port. If you are not logged in, log in at work.withpixie.ai because -as of writing this, auth0 doesn't accept callbacks to prod.withpixie.ai:8080 +Navigate to https://work.getcosmic.ai:8080/ +Note the https and port. If you are not logged in, log in at work.getcosmic.ai because +as of writing this, auth0 doesn't accept callbacks to work.getcosmic.ai:8080 + +## Note if you are tunneling or get HSTS exceptions +(please do this at your own risk) +in Chrome, navigate to +chrome://net-internals/#hsts and delete the HSTS rules for work.getcosmic.ai + +This will then unblock the security feature for this domain. Please ensure to remove this once you are done. + + +## For a remote VM +### openSSH client +ssh -i privkey user@IP -D 8080 + +### gcloud +export instancename="instance-pixie-dev" +export project="gcp-project-uuid" +export zone="europe-west1-d" +gcloud compute ssh $instancename --zone $zone --project $project -- -NL 8080:localhost:8080 \ No newline at end of file From 3492af4f4482310c6d46887d025c7a139a479590 Mon Sep 17 00:00:00 2001 From: Duck <70207455+entlein@users.noreply.github.com> Date: Thu, 5 Jun 2025 18:30:23 +0200 Subject: [PATCH 74/86] Update README.md Signed-off-by: Duck <70207455+entlein@users.noreply.github.com> --- src/ui/README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/ui/README.md b/src/ui/README.md index 5e09be041b8..09cdee3d741 100644 --- a/src/ui/README.md +++ b/src/ui/README.md @@ -48,10 +48,13 @@ This will then unblock the security feature for this domain. Please ensure to re ## For a remote VM ### openSSH client +``` ssh -i privkey user@IP -D 8080 - +``` ### gcloud +``` export instancename="instance-pixie-dev" export project="gcp-project-uuid" export zone="europe-west1-d" -gcloud compute ssh $instancename --zone $zone --project $project -- -NL 8080:localhost:8080 \ No newline at end of file +gcloud compute ssh $instancename --zone $zone --project $project -- -NL 8080:localhost:8080 +``` From a4239f34dc1c52ba072149c6c277484691b7b1df Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Thu, 30 Oct 2025 08:04:19 +0000 Subject: [PATCH 75/86] Remove hard coded clickhouse details in favor of DataFrame clickhouse_dsn and clickhouse_ts_col kwargs Signed-off-by: Dom Del Nano --- src/carnot/carnot_executable.cc | 6 +- .../exec/clickhouse_export_sink_node_test.cc | 17 ++++ .../exec/clickhouse_source_node_test.cc | 17 ++++ src/carnot/planner/ir/clickhouse_source_ir.cc | 57 ++++++----- src/carnot/planner/ir/clickhouse_source_ir.h | 27 +++++- src/carnot/planner/logical_planner_test.cc | 2 +- src/carnot/planner/objects/dataframe.cc | 94 ++++++++++++++++++- 7 files changed, 182 insertions(+), 38 deletions(-) diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index f8ffb4def19..212fa5a43d6 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -45,10 +45,12 @@ // Example clickhouse test usage: // The records inserted into clickhouse exist between -10m and -5m -// bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --query="import px;df = px.DataFrame('http_events', clickhouse=True, start_time='-10m', end_time='-9m'); px.display(df)" --output_file=$(pwd)/output.csv +// bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --query="import px;df = px.DataFrame('http_events', clickhouse_dsn='default:test_password@localhost:9000/default', start_time='-10m', end_time='-9m'); px.display(df)" --output_file=$(pwd)/output.csv // // Testing existing ClickHouse table (kubescape_stix) table population and query: -// bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --start_clickhouse=false --query="import px;df = px.DataFrame('kubescape_stix', clickhouse=True, start_time='-10m'); px.display(df)" --output_file=$(pwd)/output.csv +// docker run -p 9000:9000 --network=host --env=CLICKHOUSE_PASSWORD=test_password clickhouse/clickhouse-server:25.7-alpine +// Create clickhouse table +// bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --start_clickhouse=false --query="import px;df = px.DataFrame('kubescape_stix', clickhouse_dsn='default:test_password@localhost:9000/default', start_time='-10m'); px.display(df)" --output_file=$(pwd)/output.csv DEFINE_string(input_file, gflags::StringFromEnv("INPUT_FILE", ""), diff --git a/src/carnot/exec/clickhouse_export_sink_node_test.cc b/src/carnot/exec/clickhouse_export_sink_node_test.cc index baea2521108..6d567380e1a 100644 --- a/src/carnot/exec/clickhouse_export_sink_node_test.cc +++ b/src/carnot/exec/clickhouse_export_sink_node_test.cc @@ -36,8 +36,10 @@ #include "src/carnot/plan/operators.h" #include "src/carnot/planpb/plan.pb.h" #include "src/carnot/udf/registry.h" +#include "src/common/event/time_system.h" #include "src/common/testing/test_utils/container_runner.h" #include "src/common/testing/testing.h" +#include "src/shared/metadata/metadata_state.h" #include "src/shared/types/arrow_adapter.h" #include "src/shared/types/column_wrapper.h" #include "src/shared/types/types.h" @@ -65,6 +67,20 @@ class ClickHouseExportSinkNodeTest : public ::testing::Test { func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + // Create a minimal agent metadata state for test execution + auto metadata_state = std::make_shared( + "test_host", // hostname + 1, // asid + getpid(), // pid + 0, // start_time + sole::uuid4(), // agent_id + "", // pod_name + sole::uuid4(), // vizier_id + "test_vizier", // vizier_name + "", // vizier_namespace + time_system_.get()); // time_system + exec_state_->set_metadata_state(metadata_state); + // Start ClickHouse container clickhouse_server_ = std::make_unique(px::testing::BazelRunfilePath(kClickHouseImage), @@ -224,6 +240,7 @@ class ClickHouseExportSinkNodeTest : public ::testing::Test { std::unique_ptr client_; std::unique_ptr exec_state_; std::unique_ptr func_registry_; + std::unique_ptr time_system_ = std::make_unique(); }; TEST_F(ClickHouseExportSinkNodeTest, BasicExport) { diff --git a/src/carnot/exec/clickhouse_source_node_test.cc b/src/carnot/exec/clickhouse_source_node_test.cc index ce0f6e7757a..18b4897b477 100644 --- a/src/carnot/exec/clickhouse_source_node_test.cc +++ b/src/carnot/exec/clickhouse_source_node_test.cc @@ -36,8 +36,10 @@ #include "src/carnot/planpb/plan.pb.h" #include "src/carnot/planpb/test_proto.h" #include "src/carnot/udf/registry.h" +#include "src/common/event/time_system.h" #include "src/common/testing/test_utils/container_runner.h" #include "src/common/testing/testing.h" +#include "src/shared/metadata/metadata_state.h" #include "src/shared/types/arrow_adapter.h" #include "src/shared/types/column_wrapper.h" #include "src/shared/types/types.h" @@ -67,6 +69,20 @@ class ClickHouseSourceNodeTest : public ::testing::Test { func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); + // Create a minimal agent metadata state for test execution + auto metadata_state = std::make_shared( + "test_host", // hostname + 1, // asid + getpid(), // pid + 0, // start_time + sole::uuid4(), // agent_id + "", // pod_name + sole::uuid4(), // vizier_id + "test_vizier", // vizier_name + "", // vizier_namespace + time_system_.get()); // time_system + exec_state_->set_metadata_state(metadata_state); + // Start ClickHouse container clickhouse_server_ = std::make_unique(px::testing::BazelRunfilePath(kClickHouseImage), @@ -184,6 +200,7 @@ class ClickHouseSourceNodeTest : public ::testing::Test { std::unique_ptr client_; std::unique_ptr exec_state_; std::unique_ptr func_registry_; + std::unique_ptr time_system_ = std::make_unique(); }; TEST_F(ClickHouseSourceNodeTest, BasicQuery) { diff --git a/src/carnot/planner/ir/clickhouse_source_ir.cc b/src/carnot/planner/ir/clickhouse_source_ir.cc index ba4bfde0410..2ada5b51ad4 100644 --- a/src/carnot/planner/ir/clickhouse_source_ir.cc +++ b/src/carnot/planner/ir/clickhouse_source_ir.cc @@ -34,12 +34,12 @@ Status ClickHouseSourceIR::ToProto(planpb::Operator* op) const { auto pb = op->mutable_clickhouse_source_op(); op->set_op_type(planpb::CLICKHOUSE_SOURCE_OPERATOR); - // TODO(ddelnano): Set ClickHouse connection parameters from config - pb->set_host("localhost"); - pb->set_port(9000); - pb->set_username("default"); - pb->set_password("test_password"); - pb->set_database("default"); + // Set ClickHouse connection parameters from stored values + pb->set_host(host_); + pb->set_port(port_); + pb->set_username(username_); + pb->set_password(password_); + pb->set_database(database_); // Build the query pb->set_query(absl::Substitute("SELECT * FROM $0", table_name_)); @@ -68,18 +68,27 @@ Status ClickHouseSourceIR::ToProto(planpb::Operator* op) const { // Set batch size pb->set_batch_size(1024); - // Set default timestamp and partition columns (can be configured later) - // TODO(ddelnano): This needs to be set properly. - pb->set_timestamp_column("event_time"); + // Set timestamp and partition columns from stored values + pb->set_timestamp_column(timestamp_column_); pb->set_partition_column("hostname"); return Status::OK(); } Status ClickHouseSourceIR::Init(const std::string& table_name, - const std::vector& select_columns) { + const std::vector& select_columns, + const std::string& host, int port, + const std::string& username, const std::string& password, + const std::string& database, + const std::string& timestamp_column) { table_name_ = table_name; column_names_ = select_columns; + host_ = host; + port_ = port; + username_ = username; + password_ = password; + database_ = database; + timestamp_column_ = timestamp_column; return Status::OK(); } @@ -154,28 +163,18 @@ StatusOr ClickHouseSourceIR::ClickHouseTypeToPixieType( StatusOr ClickHouseSourceIR::InferRelationFromClickHouse( CompilerState* compiler_state, const std::string& table_name) { // Check if ClickHouse config is available + // TODO(ddelnano): Add this check in when the configuration plumbing is done. auto* ch_config = compiler_state->clickhouse_config(); PX_UNUSED(ch_config); - // TODO(ddelnano): Add this check in when the configuration plumbing is done. - /* if (ch_config == nullptr) { */ - /* return error::Internal( */ - /* "ClickHouse config not available in compiler state. Cannot infer schema for table '$0'.", */ - /* table_name); */ - /* } */ - - // Set up ClickHouse client options - std::string host = true ? "localhost" : ch_config->host(); - int port = true ? 9000 : ch_config->port(); - std::string username = true ? "default" : ch_config->username(); - std::string password = true ? "test_password" : ch_config->password(); - std::string database = true ? "default" : ch_config->database(); + + // Use stored connection parameters from Init() clickhouse::ClientOptions options; - options.SetHost(host); - options.SetPort(port); - options.SetUser(username); - options.SetPassword(password); - options.SetDefaultDatabase(database); + options.SetHost(host_); + options.SetPort(port_); + options.SetUser(username_); + options.SetPassword(password_); + options.SetDefaultDatabase(database_); // Create ClickHouse client std::unique_ptr client; @@ -183,7 +182,7 @@ StatusOr ClickHouseSourceIR::InferRelationFromCli client = std::make_unique(options); } catch (const std::exception& e) { return error::Internal("Failed to connect to ClickHouse at $0:$1 - $2", - host, port, e.what()); + host_, port_, e.what()); } // Query ClickHouse for table schema using DESCRIBE TABLE diff --git a/src/carnot/planner/ir/clickhouse_source_ir.h b/src/carnot/planner/ir/clickhouse_source_ir.h index 988586211cc..1f578e7bcef 100644 --- a/src/carnot/planner/ir/clickhouse_source_ir.h +++ b/src/carnot/planner/ir/clickhouse_source_ir.h @@ -48,11 +48,26 @@ class ClickHouseSourceIR : public OperatorIR { * * @param table_name the table to load. * @param select_columns the columns to select. If vector is empty, then select all columns. + * @param host the ClickHouse server host. + * @param port the ClickHouse server port. + * @param username the ClickHouse username. + * @param password the ClickHouse password. + * @param database the ClickHouse database. * @return Status */ - Status Init(const std::string& table_name, const std::vector& select_columns); + Status Init(const std::string& table_name, const std::vector& select_columns, + const std::string& host = "localhost", int port = 9000, + const std::string& username = "default", const std::string& password = "", + const std::string& database = "default", + const std::string& timestamp_column = "event_time"); std::string table_name() const { return table_name_; } + std::string host() const { return host_; } + int port() const { return port_; } + std::string username() const { return username_; } + std::string password() const { return password_; } + std::string database() const { return database_; } + std::string timestamp_column() const { return timestamp_column_; } void SetTimeStartNS(int64_t time_start_ns) { time_start_ns_ = time_start_ns; } void SetTimeStopNS(int64_t time_stop_ns) { time_stop_ns_ = time_stop_ns; } @@ -103,6 +118,16 @@ class ClickHouseSourceIR : public OperatorIR { private: std::string table_name_; + // ClickHouse connection parameters + std::string host_ = "localhost"; + int port_ = 9000; + std::string username_ = "default"; + std::string password_ = ""; + std::string database_ = "default"; + + // ClickHouse column configuration + std::string timestamp_column_ = "event_time"; + std::optional time_start_ns_; std::optional time_stop_ns_; diff --git a/src/carnot/planner/logical_planner_test.cc b/src/carnot/planner/logical_planner_test.cc index 7e7fd7747a1..7f81d44dc9a 100644 --- a/src/carnot/planner/logical_planner_test.cc +++ b/src/carnot/planner/logical_planner_test.cc @@ -1049,7 +1049,7 @@ constexpr char kClickHouseSourceQuery[] = R"pxl( import px # Test ClickHouse source node functionality -df = px.DataFrame('http_events', start_time='-10m', end_time='-5m', clickhouse=True) +df = px.DataFrame('http_events', start_time='-10m', end_time='-5m', clickhouse_dsn='default:test_password@localhost:9000/default') df = df['time_', 'req_headers'] px.display(df, 'clickhouse_data') )pxl"; diff --git a/src/carnot/planner/objects/dataframe.cc b/src/carnot/planner/objects/dataframe.cc index 1fc5066e328..6392863e707 100644 --- a/src/carnot/planner/objects/dataframe.cc +++ b/src/carnot/planner/objects/dataframe.cc @@ -32,11 +32,80 @@ #include "src/carnot/planner/objects/time.h" #include "src/common/base/statusor.h" +#include +#include + namespace px { namespace carnot { namespace planner { namespace compiler { +struct ClickHouseDSN { + std::string host = "localhost"; + int port = 9000; + std::string username = "default"; + std::string password = ""; + std::string database = "default"; +}; + +/** + * @brief Parse a ClickHouse DSN string + * + * Supports formats: + * clickhouse://user:password@host:port/database + * user:password@host:port/database + * host:port + * host + */ +StatusOr ParseClickHouseDSN(const std::string& dsn_str) { + ClickHouseDSN dsn; + std::string remaining = dsn_str; + + // Strip clickhouse:// prefix if present + if (absl::StartsWith(remaining, "clickhouse://")) { + remaining = remaining.substr(13); + } + + // Parse user:password@ if present + size_t at_pos = remaining.find('@'); + if (at_pos != std::string::npos) { + std::string auth_part = remaining.substr(0, at_pos); + remaining = remaining.substr(at_pos + 1); + + size_t colon_pos = auth_part.find(':'); + if (colon_pos != std::string::npos) { + dsn.username = auth_part.substr(0, colon_pos); + dsn.password = auth_part.substr(colon_pos + 1); + } else { + dsn.username = auth_part; + } + } + + // Parse host:port/database + size_t slash_pos = remaining.find('/'); + std::string host_port; + if (slash_pos != std::string::npos) { + host_port = remaining.substr(0, slash_pos); + dsn.database = remaining.substr(slash_pos + 1); + } else { + host_port = remaining; + } + + // Parse host:port + size_t colon_pos = host_port.find(':'); + if (colon_pos != std::string::npos) { + dsn.host = host_port.substr(0, colon_pos); + std::string port_str = host_port.substr(colon_pos + 1); + if (!absl::SimpleAtoi(port_str, &dsn.port)) { + return error::InvalidArgument("Invalid port in ClickHouse DSN: $0", port_str); + } + } else if (!host_port.empty()) { + dsn.host = host_port; + } + + return dsn; +} + StatusOr> GetAsDataFrame(QLObjectPtr obj) { if (!Dataframe::IsDataframe(obj)) { return obj->CreateError("Expected DataFrame, received $0", obj->name()); @@ -119,8 +188,20 @@ StatusOr DataFrameConstructor(CompilerState* compiler_state, IR* gr std::string table_name = table->str(); // Check if we should use ClickHouse or memory source - PX_ASSIGN_OR_RETURN(BoolIR * use_clickhouse, GetArgAs(ast, args, "clickhouse")); - bool is_clickhouse = use_clickhouse->val(); + bool is_clickhouse = false; + ClickHouseDSN dsn; + std::string timestamp_column = "event_time"; + if (!NoneObject::IsNoneObject(args.GetArg("clickhouse_dsn"))) { + is_clickhouse = true; + PX_ASSIGN_OR_RETURN(StringIR * dsn_ir, GetArgAs(ast, args, "clickhouse_dsn")); + PX_ASSIGN_OR_RETURN(dsn, ParseClickHouseDSN(dsn_ir->str())); + + // Get timestamp column if specified + if (!NoneObject::IsNoneObject(args.GetArg("clickhouse_ts_col"))) { + PX_ASSIGN_OR_RETURN(StringIR * ts_col_ir, GetArgAs(ast, args, "clickhouse_ts_col")); + timestamp_column = ts_col_ir->str(); + } + } if (is_clickhouse) { // Create ClickHouseSourceIR @@ -141,7 +222,10 @@ StatusOr DataFrameConstructor(CompilerState* compiler_state, IR* gr // If columns is empty, select_all() will be true and ResolveType will handle adding all columns PX_ASSIGN_OR_RETURN(ClickHouseSourceIR * clickhouse_source_op, - graph->CreateNode(ast, table_name, clickhouse_columns)); + graph->CreateNode(ast, table_name, clickhouse_columns, + dsn.host, dsn.port, dsn.username, + dsn.password, dsn.database, + timestamp_column)); if (!NoneObject::IsNoneObject(args.GetArg("start_time"))) { PX_ASSIGN_OR_RETURN(ExpressionIR * start_time, @@ -475,8 +559,8 @@ Status Dataframe::Init() { PX_ASSIGN_OR_RETURN( std::shared_ptr constructor_fn, FuncObject::Create( - name(), {"table", "select", "start_time", "end_time", "clickhouse"}, - {{"select", "[]"}, {"start_time", "None"}, {"end_time", "None"}, {"clickhouse", "False"}}, + name(), {"table", "select", "start_time", "end_time", "clickhouse_dsn", "clickhouse_ts_col"}, + {{"select", "[]"}, {"start_time", "None"}, {"end_time", "None"}, {"clickhouse_dsn", "None"}, {"clickhouse_ts_col", "None"}}, /* has_variable_len_args */ false, /* has_variable_len_kwargs */ false, std::bind(&DataFrameConstructor, compiler_state_, graph(), std::placeholders::_1, From ae967ffaf9e7f2f1793de565b36bb403102b221c Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 2 Nov 2025 22:19:35 +0000 Subject: [PATCH 76/86] Add test to verify that ClickHouseSourceNodeIR correctly keeps non default values Signed-off-by: Dom Del Nano --- src/carnot/plan/operators.cc | 12 +++++++++++- src/carnot/planner/ir/clickhouse_source_ir.cc | 6 ++++++ src/carnot/planner/logical_planner_test.cc | 7 ++++++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/carnot/plan/operators.cc b/src/carnot/plan/operators.cc index ce4883db10c..488671e7a97 100644 --- a/src/carnot/plan/operators.cc +++ b/src/carnot/plan/operators.cc @@ -734,7 +734,17 @@ StatusOr EmptySourceOperator::OutputRelation( */ std::string ClickHouseSourceOperator::DebugString() const { - return absl::Substitute("Op:ClickHouseSource(query=$0)", pb_.query()); + return absl::Substitute(R"(Op:ClickHouseSource( + host=$0 + port=$1 + username=$2 + batch_size=$3 + start_time=$4 + end_time=$5 + timestamp_column=$6 + partition_column=$7 +)", pb_.host(), pb_.port(), pb_.username(), pb_.batch_size(), pb_.start_time(), pb_.end_time(), + pb_.timestamp_column(), pb_.partition_column()); } Status ClickHouseSourceOperator::Init(const planpb::ClickHouseSourceOperator& pb) { diff --git a/src/carnot/planner/ir/clickhouse_source_ir.cc b/src/carnot/planner/ir/clickhouse_source_ir.cc index 2ada5b51ad4..94f46d5ad68 100644 --- a/src/carnot/planner/ir/clickhouse_source_ir.cc +++ b/src/carnot/planner/ir/clickhouse_source_ir.cc @@ -124,6 +124,12 @@ Status ClickHouseSourceIR::CopyFromNodeImpl(const IRNode* node, column_index_map_set_ = source_ir->column_index_map_set_; column_index_map_ = source_ir->column_index_map_; + username_ = source_ir->username_; + password_ = source_ir->password_; + database_ = source_ir->database_; + port_ = source_ir->port_; + host_ = source_ir->host_; + return Status::OK(); } diff --git a/src/carnot/planner/logical_planner_test.cc b/src/carnot/planner/logical_planner_test.cc index 7f81d44dc9a..646f36f88d3 100644 --- a/src/carnot/planner/logical_planner_test.cc +++ b/src/carnot/planner/logical_planner_test.cc @@ -1049,7 +1049,7 @@ constexpr char kClickHouseSourceQuery[] = R"pxl( import px # Test ClickHouse source node functionality -df = px.DataFrame('http_events', start_time='-10m', end_time='-5m', clickhouse_dsn='default:test_password@localhost:9000/default') +df = px.DataFrame('http_events', start_time='-10m', end_time='-5m', clickhouse_dsn='user:test@clickhouse-server:9000/pixie') df = df['time_', 'req_headers'] px.display(df, 'clickhouse_data') )pxl"; @@ -1073,6 +1073,11 @@ TEST_F(LogicalPlannerTest, ClickHouseSourceNode) { for (const auto& planFragment : agent_plan.nodes()) { for (const auto& planNode : planFragment.nodes()) { if (planNode.op().op_type() == planpb::OperatorType::CLICKHOUSE_SOURCE_OPERATOR) { + EXPECT_THAT(planNode.op().clickhouse_source_op().host(), "clickhouse-server"); + EXPECT_THAT(planNode.op().clickhouse_source_op().port(), 9000); + EXPECT_THAT(planNode.op().clickhouse_source_op().database(), "pixie"); + EXPECT_THAT(planNode.op().clickhouse_source_op().username(), "user"); + EXPECT_THAT(planNode.op().clickhouse_source_op().password(), "test"); has_clickhouse_source = true; break; } From 60edeeb9fffa260cca97804e63599ba7d862d5c0 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 2 Nov 2025 23:42:11 +0000 Subject: [PATCH 77/86] Fix bug where column indicies were mismatched with child Map operators by ensuring ClickHouseSourceNode specifies cols specifically Signed-off-by: Dom Del Nano --- src/carnot/carnot_executable.cc | 17 ++++++++++++++++- src/carnot/planner/ir/clickhouse_source_ir.cc | 13 ++++++++----- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/src/carnot/carnot_executable.cc b/src/carnot/carnot_executable.cc index 212fa5a43d6..4f19570e782 100644 --- a/src/carnot/carnot_executable.cc +++ b/src/carnot/carnot_executable.cc @@ -47,9 +47,24 @@ // The records inserted into clickhouse exist between -10m and -5m // bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --query="import px;df = px.DataFrame('http_events', clickhouse_dsn='default:test_password@localhost:9000/default', start_time='-10m', end_time='-9m'); px.display(df)" --output_file=$(pwd)/output.csv // +// +// Test that verifies bug with Map operators isn't introduced +// bazel run -c dbg src/carnot:carnot_executable -- -v=1 --vmodule=clickhouse_source_node=1 --use_clickhouse=true --query="import px;df = px.DataFrame('http_events', clickhouse_dsn='default:test_password@localhost:9000/default', start_time='-10m', end_time='-9m'); df.time_ = df.event_time; df = df[['time_', 'req_path']]; px.display(df)" --output_file=$(pwd)/output.csv +// +// // Testing existing ClickHouse table (kubescape_stix) table population and query: // docker run -p 9000:9000 --network=host --env=CLICKHOUSE_PASSWORD=test_password clickhouse/clickhouse-server:25.7-alpine -// Create clickhouse table +// CREATE TABLE IF NOT EXISTS default.kubescape_stix ( +// timestamp String, +// pod_name String, +// namespace String, +// data String, +// hostname String, +// event_time DateTime64(3) +//) ENGINE = MergeTree() +//PARTITION BY toYYYYMM(event_time) +//ORDER BY (hostname, event_time); + // bazel run -c dbg src/carnot:carnot_executable -- --vmodule=clickhouse_source_node=1 --use_clickhouse=true --start_clickhouse=false --query="import px;df = px.DataFrame('kubescape_stix', clickhouse_dsn='default:test_password@localhost:9000/default', start_time='-10m'); px.display(df)" --output_file=$(pwd)/output.csv diff --git a/src/carnot/planner/ir/clickhouse_source_ir.cc b/src/carnot/planner/ir/clickhouse_source_ir.cc index 94f46d5ad68..9d6aba8dfc1 100644 --- a/src/carnot/planner/ir/clickhouse_source_ir.cc +++ b/src/carnot/planner/ir/clickhouse_source_ir.cc @@ -41,22 +41,26 @@ Status ClickHouseSourceIR::ToProto(planpb::Operator* op) const { pb->set_password(password_); pb->set_database(database_); - // Build the query - pb->set_query(absl::Substitute("SELECT * FROM $0", table_name_)); - if (!column_index_map_set()) { return error::InvalidArgument("ClickHouseSource columns are not set."); } DCHECK(is_type_resolved()); DCHECK_EQ(column_index_map_.size(), resolved_table_type()->ColumnNames().size()); + + // Build the query with explicit column list to match output_descriptor_ order + std::vector column_list; for (const auto& [idx, col_name] : Enumerate(resolved_table_type()->ColumnNames())) { + column_list.push_back(col_name); pb->add_column_names(col_name); auto val_type = std::static_pointer_cast( resolved_table_type()->GetColumnType(col_name).ConsumeValueOrDie()); pb->add_column_types(val_type->data_type()); } + // Generate SELECT with explicit columns instead of SELECT * to ensure correct column ordering + pb->set_query(absl::Substitute("SELECT $0 FROM $1", absl::StrJoin(column_list, ", "), table_name_)); + if (IsTimeStartSet()) { pb->set_start_time(time_start_ns()); } @@ -162,7 +166,6 @@ StatusOr ClickHouseSourceIR::ClickHouseTypeToPixieType( if (ch_type_name == "Bool") { return types::DataType::BOOLEAN; } - // Default to String for unsupported types return types::DataType::STRING; } @@ -252,7 +255,7 @@ Status ClickHouseSourceIR::ResolveType(CompilerState* compiler_state) { auto relation_it = compiler_state->relation_map()->find(table_name()); if (relation_it == compiler_state->relation_map()->end()) { // Table not found in relation_map, try to infer from ClickHouse - LOG(INFO) << absl::Substitute("Table '$0' not found in relation_map. Attempting to infer schema from ClickHouse...", table_name()); + VLOG(1) << absl::Substitute("Table '$0' not found in relation_map. Attempting to infer schema from ClickHouse...", table_name()); auto relation_or = InferRelationFromClickHouse(compiler_state, table_name()); if (!relation_or.ok()) { From f35be2d0ef4e967946410e0fa977faa9fbf6568b Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 9 Nov 2025 18:33:21 +0000 Subject: [PATCH 78/86] Fix clickhouse export sink bugs. Use compiler state for clickhouse dsn Signed-off-by: Dom Del Nano --- src/carnot/carnot.cc | 1 + .../exec/clickhouse_export_sink_node.cc | 13 ++ .../exec/clickhouse_export_sink_node_test.cc | 116 ++++++++++++++++ .../planner/ir/clickhouse_export_sink_ir.cc | 54 ++++++-- .../planner/ir/clickhouse_export_sink_ir.h | 12 +- .../ir/clickhouse_export_sink_ir_test.cc | 6 +- src/carnot/planner/ir/pattern_match.h | 6 +- src/carnot/planner/logical_planner_test.cc | 126 ++++++++++++++++++ src/carnot/planner/objects/otel.cc | 33 ++++- src/carnot/planner/objects/otel.h | 2 + 10 files changed, 345 insertions(+), 24 deletions(-) diff --git a/src/carnot/carnot.cc b/src/carnot/carnot.cc index bae8ee50831..ff55ff0ec15 100644 --- a/src/carnot/carnot.cc +++ b/src/carnot/carnot.cc @@ -182,6 +182,7 @@ Status CarnotImpl::RegisterUDFsInPlanFragment(exec::ExecState* exec_state, plan: .OnEmptySource(no_op) .OnOTelSink(no_op) .OnClickHouseSource(no_op) + .OnClickHouseExportSink(no_op) .Walk(pf); } diff --git a/src/carnot/exec/clickhouse_export_sink_node.cc b/src/carnot/exec/clickhouse_export_sink_node.cc index 73b23e58be1..6a11a42d37a 100644 --- a/src/carnot/exec/clickhouse_export_sink_node.cc +++ b/src/carnot/exec/clickhouse_export_sink_node.cc @@ -23,6 +23,8 @@ #include #include +#include +#include #include "glog/logging.h" #include "src/carnot/planpb/plan.pb.h" #include "src/common/base/macros.h" @@ -145,6 +147,17 @@ Status ClickHouseExportSinkNode::ConsumeNextImpl(ExecState* /*exec_state*/, cons block.AppendColumn(mapping.clickhouse_column_name(), col); break; } + case types::UINT128: { + // UINT128 is exported as STRING (UUID format) + auto col = std::make_shared(); + for (int64_t i = 0; i < num_rows; ++i) { + auto val = types::GetValueFromArrowArray(arrow_col.get(), i); + std::string uuid_str = sole::rebuild(absl::Uint128High64(val), absl::Uint128Low64(val)).str(); + col->Append(uuid_str); + } + block.AppendColumn(mapping.clickhouse_column_name(), col); + break; + } default: return error::InvalidArgument("Unsupported data type for ClickHouse export: $0", types::ToString(mapping.column_type())); diff --git a/src/carnot/exec/clickhouse_export_sink_node_test.cc b/src/carnot/exec/clickhouse_export_sink_node_test.cc index 6d567380e1a..75913be408c 100644 --- a/src/carnot/exec/clickhouse_export_sink_node_test.cc +++ b/src/carnot/exec/clickhouse_export_sink_node_test.cc @@ -363,6 +363,122 @@ TEST_F(ClickHouseExportSinkNodeTest, MultipleBatches) { } } +TEST_F(ClickHouseExportSinkNodeTest, UINT128Export) { + const std::string table_name = "export_test_uint128"; + + // Create table with String column for UUID + try { + client_->Execute(absl::Substitute("DROP TABLE IF EXISTS $0", table_name)); + + client_->Execute(absl::Substitute(R"( + CREATE TABLE $0 ( + time_ DateTime64(9), + upid String, + hostname String, + value Int64 + ) ENGINE = MergeTree() + ORDER BY time_ + )", table_name)); + + LOG(INFO) << "UINT128 export table created successfully: " << table_name; + } catch (const std::exception& e) { + LOG(ERROR) << "Failed to create UINT128 export table: " << e.what(); + throw; + } + + // Create plan node for UINT128 test + planpb::Operator op; + op.set_op_type(planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR); + auto* ch_op = op.mutable_clickhouse_sink_op(); + + auto* config = ch_op->mutable_clickhouse_config(); + config->set_host("localhost"); + config->set_port(kClickHousePort); + config->set_username("default"); + config->set_password("test_password"); + config->set_database("default"); + + ch_op->set_table_name(table_name); + + // Add column mappings + auto* mapping0 = ch_op->add_column_mappings(); + mapping0->set_input_column_index(0); + mapping0->set_clickhouse_column_name("time_"); + mapping0->set_column_type(types::TIME64NS); + + auto* mapping1 = ch_op->add_column_mappings(); + mapping1->set_input_column_index(1); + mapping1->set_clickhouse_column_name("upid"); + mapping1->set_column_type(types::UINT128); + + auto* mapping2 = ch_op->add_column_mappings(); + mapping2->set_input_column_index(2); + mapping2->set_clickhouse_column_name("hostname"); + mapping2->set_column_type(types::STRING); + + auto* mapping3 = ch_op->add_column_mappings(); + mapping3->set_input_column_index(3); + mapping3->set_clickhouse_column_name("value"); + mapping3->set_column_type(types::INT64); + + auto plan_node = std::make_unique(1); + EXPECT_OK(plan_node->Init(op.clickhouse_sink_op())); + + // Define input schema + RowDescriptor input_rd({types::TIME64NS, types::UINT128, types::STRING, types::INT64}); + + // Create node tester + auto tester = exec::ExecNodeTester( + *plan_node, RowDescriptor({}), {input_rd}, exec_state_.get()); + + // Create test UUIDs + auto uuid1 = sole::uuid4(); + auto uuid2 = sole::uuid4(); + auto uuid3 = sole::uuid4(); + + absl::uint128 upid1 = absl::MakeUint128(uuid1.ab, uuid1.cd); + absl::uint128 upid2 = absl::MakeUint128(uuid2.ab, uuid2.cd); + absl::uint128 upid3 = absl::MakeUint128(uuid3.ab, uuid3.cd); + + // Create test data with UINT128 values + auto rb1 = RowBatchBuilder(input_rd, 2, /*eow*/ false, /*eos*/ false) + .AddColumn({1000000000000000000LL, 2000000000000000000LL}) + .AddColumn({upid1, upid2}) + .AddColumn({"host1", "host2"}) + .AddColumn({100, 200}) + .get(); + + auto rb2 = RowBatchBuilder(input_rd, 1, /*eow*/ true, /*eos*/ true) + .AddColumn({3000000000000000000LL}) + .AddColumn({upid3}) + .AddColumn({"host3"}) + .AddColumn({300}) + .get(); + + // Send data to sink + tester.ConsumeNext(rb1, 0, 0); + tester.ConsumeNext(rb2, 0, 0); + tester.Close(); + + // Verify data was inserted and UINT128 values were converted to UUID strings + auto results = QueryTable(absl::Substitute("SELECT upid, hostname, value FROM $0 ORDER BY time_", table_name)); + + ASSERT_EQ(results.size(), 3); + + // Check that UINT128 values were converted to valid UUID strings + EXPECT_EQ(results[0][0], uuid1.str()); + EXPECT_EQ(results[0][1], "host1"); + EXPECT_EQ(results[0][2], "100"); + + EXPECT_EQ(results[1][0], uuid2.str()); + EXPECT_EQ(results[1][1], "host2"); + EXPECT_EQ(results[1][2], "200"); + + EXPECT_EQ(results[2][0], uuid3.str()); + EXPECT_EQ(results[2][1], "host3"); + EXPECT_EQ(results[2][2], "300"); +} + } // namespace exec } // namespace carnot } // namespace px diff --git a/src/carnot/planner/ir/clickhouse_export_sink_ir.cc b/src/carnot/planner/ir/clickhouse_export_sink_ir.cc index f3a10ea9556..3137cbc2c7a 100644 --- a/src/carnot/planner/ir/clickhouse_export_sink_ir.cc +++ b/src/carnot/planner/ir/clickhouse_export_sink_ir.cc @@ -19,6 +19,7 @@ #include "src/carnot/planner/ir/clickhouse_export_sink_ir.h" #include "src/carnot/planner/ir/ir.h" #include "src/carnot/planpb/plan.pb.h" +#include namespace px { namespace carnot { @@ -29,7 +30,44 @@ ClickHouseExportSinkIR::RequiredInputColumns() const { return std::vector>{required_column_names_}; } +Status ClickHouseExportSinkIR::Init(OperatorIR* parent, const std::string& table_name, + const std::string& clickhouse_dsn) { + table_name_ = table_name; + + // Parse the ClickHouse DSN and initialize the config + PX_ASSIGN_OR_RETURN(auto config, ParseClickHouseDSN(clickhouse_dsn)); + clickhouse_config_ = std::make_unique(config); + + return AddParent(parent); +} + +StatusOr ClickHouseExportSinkIR::ParseClickHouseDSN(const std::string& dsn) { + // Expected format: [clickhouse://]username:password@host:port/database + // The clickhouse:// prefix is optional + std::regex dsn_regex(R"((?:clickhouse://)?([^:]+):([^@]+)@([^:]+):(\d+)/(.+))"); + std::smatch matches; + + if (!std::regex_match(dsn, matches, dsn_regex)) { + return error::InvalidArgument("Invalid ClickHouse DSN format. Expected: [clickhouse://]username:password@host:port/database"); + } + + planpb::ClickHouseConfig config; + + // Extract the components + config.set_username(matches[1].str()); + config.set_password(matches[2].str()); + config.set_host(matches[3].str()); + config.set_port(std::stoi(matches[4].str())); + config.set_database(matches[5].str()); + + // hostname will be set by the runtime + config.set_hostname(""); + + return config; +} + Status ClickHouseExportSinkIR::ToProto(planpb::Operator* op) const { + PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); op->set_op_type(planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR); auto clickhouse_op = op->mutable_clickhouse_sink_op(); @@ -43,17 +81,17 @@ Status ClickHouseExportSinkIR::ToProto(planpb::Operator* op) const { clickhouse_op->set_table_name(table_name_); // Map all input columns to ClickHouse columns - DCHECK_EQ(1U, parent_types().size()); - auto parent_table_type = std::static_pointer_cast(parent_types()[0]); + DCHECK(is_type_resolved()); + int64_t idx = 0; + for (const auto& [col_name, col_type] : *resolved_table_type()) { + DCHECK(col_type->IsValueType()); + auto value_type = std::static_pointer_cast(col_type); - for (const auto& [idx, col_name] : Enumerate(parent_table_type->ColumnNames())) { auto column_mapping = clickhouse_op->add_column_mappings(); column_mapping->set_input_column_index(idx); column_mapping->set_clickhouse_column_name(col_name); - - PX_ASSIGN_OR_RETURN(auto col_type, parent_table_type->GetColumnType(col_name)); - auto value_type = std::static_pointer_cast(col_type); column_mapping->set_column_type(value_type->data_type()); + idx++; } return Status::OK(); @@ -75,8 +113,8 @@ Status ClickHouseExportSinkIR::ResolveType(CompilerState* compiler_state) { auto parent_table_type = std::static_pointer_cast(parent_types()[0]); - // Store ClickHouse config from compiler state - if (compiler_state->clickhouse_config() != nullptr) { + // Store ClickHouse config from compiler state only if not already set by Init() + if (clickhouse_config_ == nullptr && compiler_state->clickhouse_config() != nullptr) { clickhouse_config_ = std::make_unique(*compiler_state->clickhouse_config()); } diff --git a/src/carnot/planner/ir/clickhouse_export_sink_ir.h b/src/carnot/planner/ir/clickhouse_export_sink_ir.h index 9864113832a..c6e65e16538 100644 --- a/src/carnot/planner/ir/clickhouse_export_sink_ir.h +++ b/src/carnot/planner/ir/clickhouse_export_sink_ir.h @@ -37,14 +37,14 @@ namespace planner { * @brief The IR representation for the ClickHouseExportSink operator. * Represents a configuration to export a DataFrame to a ClickHouse database. */ -class ClickHouseExportSinkIR : public OperatorIR { +class ClickHouseExportSinkIR : public SinkOperatorIR { public: - explicit ClickHouseExportSinkIR(int64_t id) : OperatorIR(id, IRNodeType::kClickHouseExportSink) {} + explicit ClickHouseExportSinkIR(int64_t id, std::string mutation_id) + : SinkOperatorIR(id, IRNodeType::kClickHouseExportSink, mutation_id) {} - Status Init(OperatorIR* parent, const std::string& table_name) { - table_name_ = table_name; - return AddParent(parent); - } + Status Init(OperatorIR* parent, const std::string& table_name, const std::string& clickhouse_dsn); + + StatusOr ParseClickHouseDSN(const std::string& dsn); Status ToProto(planpb::Operator* op) const override; diff --git a/src/carnot/planner/ir/clickhouse_export_sink_ir_test.cc b/src/carnot/planner/ir/clickhouse_export_sink_ir_test.cc index 6b45dfa9820..f3f13ad329d 100644 --- a/src/carnot/planner/ir/clickhouse_export_sink_ir_test.cc +++ b/src/carnot/planner/ir/clickhouse_export_sink_ir_test.cc @@ -49,8 +49,9 @@ TEST_F(ClickHouseExportSinkTest, basic_export) { auto src = MakeMemSource("table"); EXPECT_OK(src->ResolveType(compiler_state_.get())); + std::string clickhouse_dsn = "default:test_password@localhost:9000/default"; ASSERT_OK_AND_ASSIGN(auto clickhouse_sink, - graph->CreateNode(src->ast(), src, "http_events")); + graph->CreateNode(src->ast(), src, "http_events", clickhouse_dsn)); clickhouse_sink->PullParentTypes(); EXPECT_OK(clickhouse_sink->UpdateOpAfterParentTypesResolved()); @@ -112,8 +113,9 @@ TEST_F(ClickHouseExportSinkTest, required_input_columns) { auto src = MakeMemSource("table"); EXPECT_OK(src->ResolveType(compiler_state_.get())); + std::string clickhouse_dsn = "default:test_password@localhost:9000/default"; ASSERT_OK_AND_ASSIGN(auto clickhouse_sink, - graph->CreateNode(src->ast(), src, "http_events")); + graph->CreateNode(src->ast(), src, "http_events", clickhouse_dsn)); clickhouse_sink->PullParentTypes(); EXPECT_OK(clickhouse_sink->UpdateOpAfterParentTypesResolved()); diff --git a/src/carnot/planner/ir/pattern_match.h b/src/carnot/planner/ir/pattern_match.h index f8c484f47b9..0eb386ddbc5 100644 --- a/src/carnot/planner/ir/pattern_match.h +++ b/src/carnot/planner/ir/pattern_match.h @@ -160,6 +160,10 @@ inline ClassMatch OTelExportSink() { return ClassMatch(); } +inline ClassMatch ClickHouseExportSink() { + return ClassMatch(); +} + inline ClassMatch EmptySource() { return ClassMatch(); } @@ -266,7 +270,7 @@ struct ResultSink : public ParentMatch { bool Match(const IRNode* node) const override { return ExternalGRPCSink().Match(node) || MemorySink().Match(node) || - OTelExportSink().Match(node); + OTelExportSink().Match(node) || ClickHouseExportSink().Match(node); } }; diff --git a/src/carnot/planner/logical_planner_test.cc b/src/carnot/planner/logical_planner_test.cc index 646f36f88d3..5bbacb0fdf1 100644 --- a/src/carnot/planner/logical_planner_test.cc +++ b/src/carnot/planner/logical_planner_test.cc @@ -1093,6 +1093,132 @@ TEST_F(LogicalPlannerTest, ClickHouseSourceNode) { EXPECT_TRUE(has_clickhouse_source); } +constexpr char kClickHouseExportQuery[] = R"pxl( +import px + +# Test ClickHouse export using endpoint config +df = px.DataFrame('http_events', start_time='-10m') +df = df[['time_', 'req_path', 'resp_status', 'resp_latency_ns']] +px.export(df, px.otel.ClickHouseRows(table='http_events')) +)pxl"; + +TEST_F(LogicalPlannerTest, ClickHouseExportWithEndpointConfig) { + auto planner = LogicalPlanner::Create(info_).ConsumeValueOrDie(); + + // Create a planner state with an OTel endpoint config containing ClickHouse DSN + auto state = testutils::CreateTwoPEMsOneKelvinPlannerState(testutils::kHttpEventsSchema); + + // Set up the endpoint config with ClickHouse DSN in the URL field + auto* endpoint_config = state.mutable_otel_endpoint_config(); + endpoint_config->set_url("clickhouse_user:clickhouse_pass@clickhouse.example.com:9000/pixie_db"); + endpoint_config->set_insecure(true); + endpoint_config->set_timeout(10); + + auto plan_or_s = planner->Plan(MakeQueryRequest(state, kClickHouseExportQuery)); + EXPECT_OK(plan_or_s); + auto plan = plan_or_s.ConsumeValueOrDie(); + EXPECT_OK(plan->ToProto()); + + // Verify the plan contains ClickHouse export sink operators with correct config + auto plan_pb = plan->ToProto().ConsumeValueOrDie(); + bool has_clickhouse_export = false; + + for (const auto& [address, agent_plan] : plan_pb.qb_address_to_plan()) { + for (const auto& planFragment : agent_plan.nodes()) { + for (const auto& planNode : planFragment.nodes()) { + if (planNode.op().op_type() == planpb::OperatorType::CLICKHOUSE_EXPORT_SINK_OPERATOR) { + const auto& clickhouse_sink_op = planNode.op().clickhouse_sink_op(); + + // Verify table name + EXPECT_EQ(clickhouse_sink_op.table_name(), "http_events"); + + // Verify the DSN was parsed correctly into ClickHouseConfig + const auto& config = clickhouse_sink_op.clickhouse_config(); + EXPECT_EQ(config.username(), "clickhouse_user"); + EXPECT_EQ(config.password(), "clickhouse_pass"); + EXPECT_EQ(config.host(), "clickhouse.example.com"); + EXPECT_EQ(config.port(), 9000); + EXPECT_EQ(config.database(), "pixie_db"); + + // Verify column mappings were created + EXPECT_GT(clickhouse_sink_op.column_mappings_size(), 0); + + has_clickhouse_export = true; + break; + } + } + if (has_clickhouse_export) break; + } + if (has_clickhouse_export) break; + } + + EXPECT_TRUE(has_clickhouse_export); +} + +constexpr char kClickHouseExportWithExplicitEndpointQuery[] = R"pxl( +import px + +# Test ClickHouse export with explicit endpoint config +df = px.DataFrame('http_events', start_time='-10m') +df = df[['time_', 'req_path', 'resp_status']] + +endpoint = px.otel.Endpoint( + url="explicit_user:explicit_pass@explicit-host:9001/explicit_db", + insecure=False, + timeout=20 +) + +px.export(df, px.otel.ClickHouseRows(table='custom_table', endpoint=endpoint)) +)pxl"; + +TEST_F(LogicalPlannerTest, ClickHouseExportWithExplicitEndpoint) { + auto planner = LogicalPlanner::Create(info_).ConsumeValueOrDie(); + + // Create a planner state with a default endpoint config + auto state = testutils::CreateTwoPEMsOneKelvinPlannerState(testutils::kHttpEventsSchema); + + // Set up a default endpoint config (should be overridden by explicit endpoint) + auto* endpoint_config = state.mutable_otel_endpoint_config(); + endpoint_config->set_url("default_user:default_pass@default-host:9000/default_db"); + + auto plan_or_s = planner->Plan(MakeQueryRequest(state, kClickHouseExportWithExplicitEndpointQuery)); + EXPECT_OK(plan_or_s); + auto plan = plan_or_s.ConsumeValueOrDie(); + EXPECT_OK(plan->ToProto()); + + // Verify the plan uses the explicit endpoint config, not the default + auto plan_pb = plan->ToProto().ConsumeValueOrDie(); + bool has_clickhouse_export = false; + + for (const auto& [address, agent_plan] : plan_pb.qb_address_to_plan()) { + for (const auto& planFragment : agent_plan.nodes()) { + for (const auto& planNode : planFragment.nodes()) { + if (planNode.op().op_type() == planpb::OperatorType::CLICKHOUSE_EXPORT_SINK_OPERATOR) { + const auto& clickhouse_sink_op = planNode.op().clickhouse_sink_op(); + + // Verify table name + EXPECT_EQ(clickhouse_sink_op.table_name(), "custom_table"); + + // Verify the explicit endpoint was used, not the default + const auto& config = clickhouse_sink_op.clickhouse_config(); + EXPECT_EQ(config.username(), "explicit_user"); + EXPECT_EQ(config.password(), "explicit_pass"); + EXPECT_EQ(config.host(), "explicit-host"); + EXPECT_EQ(config.port(), 9001); + EXPECT_EQ(config.database(), "explicit_db"); + + has_clickhouse_export = true; + break; + } + } + if (has_clickhouse_export) break; + } + if (has_clickhouse_export) break; + } + + EXPECT_TRUE(has_clickhouse_export); +} + } // namespace planner } // namespace carnot } // namespace px diff --git a/src/carnot/planner/objects/otel.cc b/src/carnot/planner/objects/otel.cc index 8f343800361..ee07a7f67b9 100644 --- a/src/carnot/planner/objects/otel.cc +++ b/src/carnot/planner/objects/otel.cc @@ -80,9 +80,10 @@ Status ExportToOTel(const OTelData& data, const pypa::AstPtr& ast, Dataframe* df return op->graph()->CreateNode(ast, op, data).status(); } -Status ExportToClickHouse(const std::string& table_name, const pypa::AstPtr& ast, Dataframe* df) { +Status ExportToClickHouse(const std::string& table_name, const std::string& clickhouse_dsn, + const pypa::AstPtr& ast, Dataframe* df) { auto op = df->op(); - return op->graph()->CreateNode(ast, op, table_name).status(); + return op->graph()->CreateNode(ast, op, table_name, clickhouse_dsn).status(); } StatusOr GetArgAsString(const pypa::AstPtr& ast, const ParsedArgs& args, @@ -120,13 +121,31 @@ StatusOr> ClickHouseRows::Create( return std::shared_ptr(new ClickHouseRows(ast_visitor, table_name)); } -StatusOr ClickHouseRowsDefinition(const pypa::AstPtr& ast, const ParsedArgs& args, +StatusOr ClickHouseRowsDefinition(CompilerState* compiler_state, + const pypa::AstPtr& ast, const ParsedArgs& args, ASTVisitor* visitor) { PX_ASSIGN_OR_RETURN(StringIR* table_name_ir, GetArgAs(ast, args, "table")); std::string table_name = table_name_ir->str(); - return Exporter::Create(visitor, [table_name](auto&& ast_arg, auto&& df) -> Status { - return ExportToClickHouse(table_name, std::forward(ast_arg), + // Parse endpoint config to get the ClickHouse DSN from the URL field + std::string clickhouse_dsn; + QLObjectPtr endpoint = args.GetArg("endpoint"); + if (NoneObject::IsNoneObject(endpoint)) { + if (!compiler_state->endpoint_config()) { + return endpoint->CreateError("no default config found for endpoint, please specify one"); + } + clickhouse_dsn = compiler_state->endpoint_config()->url(); + } else { + if (endpoint->type() != EndpointConfig::EndpointType.type()) { + return endpoint->CreateError("expected Endpoint type for 'endpoint' arg, received $0", + endpoint->name()); + } + auto endpoint_config = static_cast(endpoint.get()); + clickhouse_dsn = endpoint_config->url(); + } + + return Exporter::Create(visitor, [table_name, clickhouse_dsn](auto&& ast_arg, auto&& df) -> Status { + return ExportToClickHouse(table_name, clickhouse_dsn, std::forward(ast_arg), std::forward(df)); }); } @@ -376,10 +395,10 @@ Status OTelModule::Init(CompilerState* compiler_state, IR* ir) { PX_ASSIGN_OR_RETURN( std::shared_ptr clickhouse_rows_fn, - FuncObject::Create(kClickHouseRowsOpID, {"table"}, {}, + FuncObject::Create(kClickHouseRowsOpID, {"table", "endpoint"}, {{"endpoint", "None"}}, /* has_variable_len_args */ false, /* has_variable_len_kwargs */ false, - std::bind(&ClickHouseRowsDefinition, std::placeholders::_1, + std::bind(&ClickHouseRowsDefinition, compiler_state, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3), ast_visitor())); AddMethod(kClickHouseRowsOpID, clickhouse_rows_fn); diff --git a/src/carnot/planner/objects/otel.h b/src/carnot/planner/objects/otel.h index 5d7637ce18c..2e218ee46fd 100644 --- a/src/carnot/planner/objects/otel.h +++ b/src/carnot/planner/objects/otel.h @@ -289,6 +289,8 @@ class EndpointConfig : public QLObject { Status ToProto(planpb::OTelEndpointConfig* endpoint_config); + const std::string& url() const { return url_; } + protected: EndpointConfig(ASTVisitor* ast_visitor, std::string url, std::vector attributes, bool insecure, From b326816d616fa88820cd4319b723c9d01a9a747d Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 9 Nov 2025 19:09:56 +0000 Subject: [PATCH 79/86] Use k8sstormceneter plugin repo Signed-off-by: Dom Del Nano --- k8s/cloud/dev/plugin_db_updater_job.yaml | 2 +- k8s/cloud/overlays/plugin_job/plugin_job.yaml | 2 +- .../operator/opensearch_operator.yaml | 8850 +++++++++++++++++ 3 files changed, 8852 insertions(+), 2 deletions(-) create mode 100644 k8s/cloud_deps/base/opensearch/operator/opensearch_operator.yaml diff --git a/k8s/cloud/dev/plugin_db_updater_job.yaml b/k8s/cloud/dev/plugin_db_updater_job.yaml index d92d7d544f5..769e5f6bd55 100644 --- a/k8s/cloud/dev/plugin_db_updater_job.yaml +++ b/k8s/cloud/dev/plugin_db_updater_job.yaml @@ -62,7 +62,7 @@ spec: name: pl-service-config key: PL_PLUGIN_SERVICE - name: PL_PLUGIN_REPO - value: "pixie-io/pixie-plugin" + value: "k8sstormcenter/pixie-plugin" - name: PL_GH_API_KEY valueFrom: secretKeyRef: diff --git a/k8s/cloud/overlays/plugin_job/plugin_job.yaml b/k8s/cloud/overlays/plugin_job/plugin_job.yaml index 228efbda87d..ab51bd9db20 100644 --- a/k8s/cloud/overlays/plugin_job/plugin_job.yaml +++ b/k8s/cloud/overlays/plugin_job/plugin_job.yaml @@ -55,7 +55,7 @@ spec: name: pl-service-config key: PL_PLUGIN_SERVICE - name: PL_PLUGIN_REPO - value: "pixie-io/pixie-plugin" + value: "k8sstormcenter/pixie-plugin" # The alpine based image contains a shell and is needed for this command to work. # yamllint disable-line rule:line-length - image: gcr.io/cloud-sql-connectors/cloud-sql-proxy:2.11.3-alpine@sha256:4885fd3e6362ba22abff1804a7f5e75cec5fafbeb4e41be8b0059ecad94a16f1 diff --git a/k8s/cloud_deps/base/opensearch/operator/opensearch_operator.yaml b/k8s/cloud_deps/base/opensearch/operator/opensearch_operator.yaml new file mode 100644 index 00000000000..fa57525b2c6 --- /dev/null +++ b/k8s/cloud_deps/base/opensearch/operator/opensearch_operator.yaml @@ -0,0 +1,8850 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: opensearch-operator-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchactiongroups.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpensearchActionGroup + listKind: OpensearchActionGroupList + plural: opensearchactiongroups + shortNames: + - opensearchactiongroup + singular: opensearchactiongroup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpensearchActionGroup is the Schema for the opensearchactiongroups + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OpensearchActionGroupSpec defines the desired state of OpensearchActionGroup + properties: + allowedActions: + items: + type: string + type: array + description: + type: string + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: + type: string + required: + - allowedActions + - opensearchCluster + type: object + status: + description: OpensearchActionGroupStatus defines the observed state of + OpensearchActionGroup + properties: + existingActionGroup: + type: boolean + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchclusters.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpenSearchCluster + listKind: OpenSearchClusterList + plural: opensearchclusters + shortNames: + - os + - opensearch + singular: opensearchcluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.health + name: health + type: string + - description: Available nodes + jsonPath: .status.availableNodes + name: nodes + type: integer + - description: Opensearch version + jsonPath: .status.version + name: version + type: string + - jsonPath: .status.phase + name: phase + type: string + - jsonPath: .metadata.creationTimestamp + name: age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Es is the Schema for the es API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterSpec defines the desired state of OpenSearchCluster + properties: + bootstrap: + properties: + additionalConfig: + additionalProperties: + type: string + description: Extra items to add to the opensearch.yml, defaults + to General.AdditionalConfig + type: object + affinity: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + jvm: + type: string + keystore: + items: + properties: + keyMappings: + additionalProperties: + type: string + description: Key mappings from secret to keystore keys + type: object + secret: + description: Secret containing key value pairs + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: array + nodeSelector: + additionalProperties: + type: string + type: object + pluginsList: + items: + type: string + type: array + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + confMgmt: + description: ConfMgmt defines which additional services will be deployed + properties: + VerUpdate: + type: boolean + autoScaler: + type: boolean + smartScaler: + type: boolean + type: object + dashboards: + properties: + additionalConfig: + additionalProperties: + type: string + description: Additional properties for opensearch_dashboards.yaml + type: object + additionalVolumes: + items: + properties: + configMap: + description: ConfigMap to use to populate the volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: CSI object to use to populate the volume + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + emptyDir: + description: EmptyDir to use to populate the volume + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + name: + description: Name to use for the volume. Required. + type: string + path: + description: Path in the container to mount the volume at. + Required. + type: string + projected: + description: Projected object to use to populate the volume + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + restartPods: + description: Whether to restart the pods on content change + type: boolean + secret: + description: Secret to use populate the volume + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + subPath: + description: SubPath of the referenced volume to mount. + type: string + required: + - name + - path + type: object + type: array + affinity: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + annotations: + additionalProperties: + type: string + type: object + basePath: + description: Base Path for Opensearch Clusters running behind + a reverse proxy + type: string + enable: + type: boolean + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + imagePullSecrets: + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + labels: + additionalProperties: + type: string + type: object + nodeSelector: + additionalProperties: + type: string + type: object + opensearchCredentialsSecret: + description: Secret that contains fields username and password + for dashboards to use to login to opensearch, must only be supplied + if a custom securityconfig is provided + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + pluginsList: + items: + type: string + type: array + podSecurityContext: + description: Set security context for the dashboards pods + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + replicas: + format: int32 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + securityContext: + description: Set security context for the dashboards pods' container + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + service: + properties: + labels: + additionalProperties: + type: string + type: object + loadBalancerSourceRanges: + items: + type: string + type: array + type: + default: ClusterIP + description: Service Type string describes ingress methods + for a service + enum: + - ClusterIP + - NodePort + - LoadBalancer + type: string + type: object + tls: + properties: + caSecret: + description: Optional, secret that contains the ca certificate + as ca.crt. If this and generate=true is set the existing + CA cert from that secret is used to generate the node certs. + In this case must contain ca.crt and ca.key fields + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + enable: + description: Enable HTTPS for Dashboards + type: boolean + generate: + description: Generate certificate, if false secret must be + provided + type: boolean + secret: + description: Optional, name of a TLS secret that contains + ca.crt, tls.key and tls.crt data. If ca.crt is in a different + secret provide it via the caSecret field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + tolerations: + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + version: + type: string + required: + - replicas + - version + type: object + general: + description: |- + INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file + properties: + additionalConfig: + additionalProperties: + type: string + description: Extra items to add to the opensearch.yml + type: object + additionalVolumes: + description: Additional volumes to mount to all pods in the cluster + items: + properties: + configMap: + description: ConfigMap to use to populate the volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: CSI object to use to populate the volume + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + emptyDir: + description: EmptyDir to use to populate the volume + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + name: + description: Name to use for the volume. Required. + type: string + path: + description: Path in the container to mount the volume at. + Required. + type: string + projected: + description: Projected object to use to populate the volume + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: |- + sources is the list of volume projections. Each entry in this list + handles one source. + items: + description: |- + Projection that may be projected along with other supported volume types. + Exactly one of these fields must be set. + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + restartPods: + description: Whether to restart the pods on content change + type: boolean + secret: + description: Secret to use populate the volume + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + subPath: + description: SubPath of the referenced volume to mount. + type: string + required: + - name + - path + type: object + type: array + annotations: + additionalProperties: + type: string + description: Adds support for annotations in services + type: object + command: + type: string + defaultRepo: + type: string + drainDataNodes: + description: Drain data nodes controls whether to drain data notes + on rolling restart operations + type: boolean + httpPort: + default: 9200 + format: int32 + type: integer + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + imagePullSecrets: + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + keystore: + description: Populate opensearch keystore before startup + items: + properties: + keyMappings: + additionalProperties: + type: string + description: Key mappings from secret to keystore keys + type: object + secret: + description: Secret containing key value pairs + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: array + monitoring: + properties: + enable: + type: boolean + labels: + additionalProperties: + type: string + type: object + monitoringUserSecret: + type: string + pluginUrl: + type: string + scrapeInterval: + type: string + tlsConfig: + properties: + insecureSkipVerify: + type: boolean + serverName: + type: string + type: object + type: object + pluginsList: + items: + type: string + type: array + podSecurityContext: + description: Set security context for the cluster pods + properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxChangePolicy: + description: |- + seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + Valid values are "MountOption" and "Recursive". + + "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + + "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + This requires all Pods that share the same volume to use the same SELinux label. + It is not possible to share the same volume among privileged and unprivileged Pods. + Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + CSIDriver instance. Other volumes are always re-labelled recursively. + "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + + If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + and "Recursive" for all other volumes. + + This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + + All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + Note that this field cannot be set when spec.os.name is windows. + type: string + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in + addition to the container's primary GID and fsGroup (if specified). If + the SupplementalGroupsPolicy feature is enabled, the + supplementalGroupsPolicy field determines whether these are in addition + to or instead of any group memberships defined in the container image. + If unspecified, no additional groups are added, though group memberships + defined in the container image may still be used, depending on the + supplementalGroupsPolicy field. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + description: |- + Defines how supplemental groups of the first container processes are calculated. + Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + and the container runtime must implement support for this feature. + Note that this field cannot be set when spec.os.name is windows. + type: string + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + securityContext: + description: Set security context for the cluster pods' container + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default value is Default which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + type: string + serviceName: + type: string + setVMMaxMapCount: + type: boolean + snapshotRepositories: + items: + properties: + name: + type: string + settings: + additionalProperties: + type: string + type: object + type: + type: string + required: + - name + - type + type: object + type: array + vendor: + enum: + - Opensearch + - Op + - OP + - os + - opensearch + type: string + version: + type: string + required: + - serviceName + type: object + initHelper: + properties: + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when to pull + a container image + type: string + imagePullSecrets: + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: array + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + version: + type: string + type: object + nodePools: + items: + properties: + additionalConfig: + additionalProperties: + type: string + type: object + affinity: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range + 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + annotations: + additionalProperties: + type: string + type: object + component: + type: string + diskSize: + type: string + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + jvm: + type: string + labels: + additionalProperties: + type: string + type: object + nodeSelector: + additionalProperties: + type: string + type: object + pdb: + properties: + enable: + type: boolean + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + minAvailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + persistence: + description: PersistencConfig defines options for data persistence + properties: + emptyDir: + description: |- + Represents an empty directory for a pod. + Empty directory volumes support ownership management and SELinux relabeling. + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + hostPath: + description: |- + Represents a host path mapped into a pod. + Host path volumes do not support ownership management or SELinux relabeling. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + pvc: + properties: + accessModes: + items: + type: string + type: array + storageClass: + type: string + type: object + type: object + priorityClassName: + type: string + probes: + properties: + liveness: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + readiness: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + startup: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + type: object + replicas: + format: int32 + type: integer + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + roles: + items: + type: string + type: array + tolerations: + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + required: + - component + - replicas + - roles + type: object + type: array + security: + description: Security defines options for managing the opensearch-security + plugin + properties: + config: + properties: + adminCredentialsSecret: + description: Secret that contains fields username and password + to be used by the operator to access the opensearch cluster + for node draining. Must be set if custom securityconfig + is provided. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + adminSecret: + description: TLS Secret that contains a client certificate + (tls.key, tls.crt, ca.crt) with admin rights in the opensearch + cluster. Must be set if transport certificates are provided + by user and not generated + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + securityConfigSecret: + description: Secret that contains the differnt yml files of + the opensearch-security config (config.yml, internal_users.yml, + ...) + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + updateJob: + description: Specific configs for the SecurityConfig update + job + properties: + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + type: object + tls: + description: Configure tls usage for transport and http interface + properties: + http: + properties: + caSecret: + description: Optional, secret that contains the ca certificate + as ca.crt. If this and generate=true is set the existing + CA cert from that secret is used to generate the node + certs. In this case must contain ca.crt and ca.key fields + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + generate: + description: If set to true the operator will generate + a CA and certificates for the cluster to use, if false + secrets with existing certificates must be supplied + type: boolean + secret: + description: Optional, name of a TLS secret that contains + ca.crt, tls.key and tls.crt data. If ca.crt is in a + different secret provide it via the caSecret field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + transport: + properties: + adminDn: + description: DNs of certificates that should have admin + access, mainly used for securityconfig updates via securityadmin.sh, + only used when existing certificates are provided + items: + type: string + type: array + caSecret: + description: Optional, secret that contains the ca certificate + as ca.crt. If this and generate=true is set the existing + CA cert from that secret is used to generate the node + certs. In this case must contain ca.crt and ca.key fields + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + generate: + description: If set to true the operator will generate + a CA and certificates for the cluster to use, if false + secrets with existing certificates must be supplied + type: boolean + nodesDn: + description: Allowed Certificate DNs for nodes, only used + when existing certificates are provided + items: + type: string + type: array + perNode: + description: Configure transport node certificate + type: boolean + secret: + description: Optional, name of a TLS secret that contains + ca.crt, tls.key and tls.crt data. If ca.crt is in a + different secret provide it via the caSecret field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: object + required: + - nodePools + type: object + status: + description: ClusterStatus defines the observed state of Es + properties: + availableNodes: + description: AvailableNodes is the number of available instances. + format: int32 + type: integer + componentsStatus: + items: + properties: + component: + type: string + conditions: + items: + type: string + type: array + description: + type: string + status: + type: string + type: object + type: array + health: + description: OpenSearchHealth is the health of the cluster as returned + by the health API. + type: string + initialized: + type: boolean + phase: + description: |- + INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + Important: Run "make" to regenerate code after modifying this file + type: string + version: + type: string + required: + - componentsStatus + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchcomponenttemplates.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpensearchComponentTemplate + listKind: OpensearchComponentTemplateList + plural: opensearchcomponenttemplates + shortNames: + - opensearchcomponenttemplate + singular: opensearchcomponenttemplate + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpensearchComponentTemplate is the schema for the OpenSearch + component templates API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + _meta: + description: Optional user metadata about the component template + x-kubernetes-preserve-unknown-fields: true + allowAutoCreate: + description: If true, then indices can be automatically created using + this template + type: boolean + name: + description: The name of the component template. Defaults to metadata.name + type: string + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + template: + description: The template that should be applied + properties: + aliases: + additionalProperties: + description: Describes the specs of an index alias + properties: + alias: + description: The name of the alias. + type: string + filter: + description: Query used to limit documents the alias can + access. + x-kubernetes-preserve-unknown-fields: true + index: + description: The name of the index that the alias points + to. + type: string + isWriteIndex: + description: If true, the index is the write index for the + alias + type: boolean + routing: + description: Value used to route indexing and search operations + to a specific shard. + type: string + type: object + description: Aliases to add + type: object + mappings: + description: Mapping for fields in the index + x-kubernetes-preserve-unknown-fields: true + settings: + description: Configuration options for the index + x-kubernetes-preserve-unknown-fields: true + type: object + version: + description: Version number used to manage the component template + externally + type: integer + required: + - opensearchCluster + - template + type: object + status: + properties: + componentTemplateName: + description: Name of the currently managed component template + type: string + existingComponentTemplate: + type: boolean + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchindextemplates.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpensearchIndexTemplate + listKind: OpensearchIndexTemplateList + plural: opensearchindextemplates + shortNames: + - opensearchindextemplate + singular: opensearchindextemplate + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpensearchIndexTemplate is the schema for the OpenSearch index + templates API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + _meta: + description: Optional user metadata about the index template + x-kubernetes-preserve-unknown-fields: true + composedOf: + description: |- + An ordered list of component template names. Component templates are merged in the order specified, + meaning that the last component template specified has the highest precedence + items: + type: string + type: array + dataStream: + description: The dataStream config that should be applied + properties: + timestamp_field: + description: TimestampField for dataStream + properties: + name: + description: Name of the field that are used for the DataStream + type: string + required: + - name + type: object + type: object + indexPatterns: + description: Array of wildcard expressions used to match the names + of indices during creation + items: + type: string + type: array + name: + description: The name of the index template. Defaults to metadata.name + type: string + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + priority: + description: |- + Priority to determine index template precedence when a new data stream or index is created. + The index template with the highest priority is chosen + type: integer + template: + description: The template that should be applied + properties: + aliases: + additionalProperties: + description: Describes the specs of an index alias + properties: + alias: + description: The name of the alias. + type: string + filter: + description: Query used to limit documents the alias can + access. + x-kubernetes-preserve-unknown-fields: true + index: + description: The name of the index that the alias points + to. + type: string + isWriteIndex: + description: If true, the index is the write index for the + alias + type: boolean + routing: + description: Value used to route indexing and search operations + to a specific shard. + type: string + type: object + description: Aliases to add + type: object + mappings: + description: Mapping for fields in the index + x-kubernetes-preserve-unknown-fields: true + settings: + description: Configuration options for the index + x-kubernetes-preserve-unknown-fields: true + type: object + version: + description: Version number used to manage the component template + externally + type: integer + required: + - indexPatterns + - opensearchCluster + type: object + status: + properties: + existingIndexTemplate: + type: boolean + indexTemplateName: + description: Name of the currently managed index template + type: string + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchismpolicies.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpenSearchISMPolicy + listKind: OpenSearchISMPolicyList + plural: opensearchismpolicies + shortNames: + - ismp + - ismpolicy + singular: opensearchismpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ISMPolicySpec is the specification for the ISM policy for + OS. + properties: + applyToExistingIndices: + description: If true, apply the policy to existing indices that match + the index patterns in the ISM template. + type: boolean + defaultState: + description: The default starting state for each index that uses this + policy. + type: string + description: + description: A human-readable description of the policy. + type: string + errorNotification: + properties: + channel: + type: string + destination: + description: The destination URL. + properties: + amazon: + properties: + url: + type: string + type: object + chime: + properties: + url: + type: string + type: object + customWebhook: + properties: + url: + type: string + type: object + slack: + properties: + url: + type: string + type: object + type: object + messageTemplate: + description: The text of the message + properties: + source: + type: string + type: object + type: object + ismTemplate: + description: Specify an ISM template pattern that matches the index + to apply the policy. + properties: + indexPatterns: + description: Index patterns on which this policy has to be applied + items: + type: string + type: array + priority: + description: Priority of the template, defaults to 0 + type: integer + required: + - indexPatterns + type: object + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + policyId: + type: string + states: + description: The states that you define in the policy. + items: + properties: + actions: + description: The actions to execute after entering a state. + items: + description: Actions are the steps that the policy sequentially + executes on entering a specific state. + properties: + alias: + properties: + actions: + description: Allocate the index to a node with a specified + attribute. + items: + properties: + add: + properties: + aliases: + description: The name of the alias. + items: + type: string + type: array + index: + description: The name of the index that + the alias points to. + type: string + isWriteIndex: + description: Specify the index that accepts + any write operations to the alias. + type: boolean + routing: + description: Limit search to an associated + shard value + type: string + type: object + remove: + properties: + aliases: + description: The name of the alias. + items: + type: string + type: array + index: + description: The name of the index that + the alias points to. + type: string + isWriteIndex: + description: Specify the index that accepts + any write operations to the alias. + type: boolean + routing: + description: Limit search to an associated + shard value + type: string + type: object + type: object + type: array + required: + - actions + type: object + allocation: + description: Allocate the index to a node with a specific + attribute set + properties: + exclude: + description: Allocate the index to a node with a specified + attribute. + type: string + include: + description: Allocate the index to a node with any + of the specified attributes. + type: string + require: + description: Don’t allocate the index to a node with + any of the specified attributes. + type: string + waitFor: + description: Wait for the policy to execute before + allocating the index to a node with a specified + attribute. + type: string + required: + - exclude + - include + - require + - waitFor + type: object + close: + description: Closes the managed index. + type: object + delete: + description: Deletes a managed index. + type: object + forceMerge: + description: Reduces the number of Lucene segments by + merging the segments of individual shards. + properties: + maxNumSegments: + description: The number of segments to reduce the + shard to. + format: int64 + type: integer + required: + - maxNumSegments + type: object + indexPriority: + description: Set the priority for the index in a specific + state. + properties: + priority: + description: The priority for the index as soon as + it enters a state. + format: int64 + type: integer + required: + - priority + type: object + notification: + description: Name string `json:"name,omitempty"` + properties: + destination: + type: string + messageTemplate: + properties: + source: + type: string + type: object + required: + - destination + - messageTemplate + type: object + open: + description: Opens a managed index. + type: object + readOnly: + description: Sets a managed index to be read only. + type: object + readWrite: + description: Sets a managed index to be writeable. + type: object + replicaCount: + description: Sets the number of replicas to assign to + an index. + properties: + numberOfReplicas: + format: int64 + type: integer + required: + - numberOfReplicas + type: object + retry: + description: The retry configuration for the action. + properties: + backoff: + description: The backoff policy type to use when retrying. + type: string + count: + description: The number of retry counts. + format: int64 + type: integer + delay: + description: The time to wait between retries. + type: string + required: + - count + type: object + rollover: + description: Rolls an alias over to a new index when the + managed index meets one of the rollover conditions. + properties: + minDocCount: + description: The minimum number of documents required + to roll over the index. + format: int64 + type: integer + minIndexAge: + description: The minimum age required to roll over + the index. + type: string + minPrimaryShardSize: + description: The minimum storage size of a single + primary shard required to roll over the index. + type: string + minSize: + description: The minimum size of the total primary + shard storage (not counting replicas) required to + roll over the index. + type: string + type: object + rollup: + description: Periodically reduce data granularity by rolling + up old data into summarized indexes. + type: object + shrink: + description: Allows you to reduce the number of primary + shards in your indexes + properties: + forceUnsafe: + description: If true, executes the shrink action even + if there are no replicas. + type: boolean + maxShardSize: + description: The maximum size in bytes of a shard + for the target index. + type: string + numNewShards: + description: The maximum number of primary shards + in the shrunken index. + type: integer + percentageOfSourceShards: + description: Percentage of the number of original + primary shards to shrink. + format: int64 + type: integer + targetIndexNameTemplate: + description: The name of the shrunken index. + type: string + type: object + snapshot: + description: Back up your cluster’s indexes and state + properties: + repository: + description: The repository name that you register + through the native snapshot API operations. + type: string + snapshot: + description: The name of the snapshot. + type: string + required: + - repository + - snapshot + type: object + timeout: + description: The timeout period for the action. Accepts + time units for minutes, hours, and days. + type: string + type: object + type: array + name: + description: The name of the state. + type: string + transitions: + description: The next states and the conditions required to + transition to those states. If no transitions exist, the policy + assumes that it’s complete and can now stop managing the index + items: + properties: + conditions: + description: conditions for the transition. + properties: + cron: + description: The cron job that triggers the transition + if no other transition happens first. + properties: + cron: + description: A wrapper for the cron job that triggers + the transition if no other transition happens + first. This wrapper is here to adhere to the + OpenSearch API. + properties: + expression: + description: The cron expression that triggers + the transition. + type: string + timezone: + description: The timezone that triggers the + transition. + type: string + required: + - expression + - timezone + type: object + required: + - cron + type: object + minDocCount: + description: The minimum document count of the index + required to transition. + format: int64 + type: integer + minIndexAge: + description: The minimum age of the index required + to transition. + type: string + minRolloverAge: + description: The minimum age required after a rollover + has occurred to transition to the next state. + type: string + minSize: + description: The minimum size of the total primary + shard storage (not counting replicas) required to + transition. + type: string + type: object + stateName: + description: The name of the state to transition to if + the conditions are met. + type: string + required: + - conditions + - stateName + type: object + type: array + required: + - actions + - name + type: object + type: array + required: + - defaultState + - description + - states + type: object + status: + description: OpensearchISMPolicyStatus defines the observed state of OpensearchISMPolicy + properties: + existingISMPolicy: + type: boolean + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + policyId: + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchroles.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpensearchRole + listKind: OpensearchRoleList + plural: opensearchroles + shortNames: + - opensearchrole + singular: opensearchrole + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpensearchRole is the Schema for the opensearchroles API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OpensearchRoleSpec defines the desired state of OpensearchRole + properties: + clusterPermissions: + items: + type: string + type: array + indexPermissions: + items: + properties: + allowedActions: + items: + type: string + type: array + dls: + type: string + fls: + items: + type: string + type: array + indexPatterns: + items: + type: string + type: array + maskedFields: + items: + type: string + type: array + type: object + type: array + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + tenantPermissions: + items: + properties: + allowedActions: + items: + type: string + type: array + tenantPatterns: + items: + type: string + type: array + type: object + type: array + required: + - opensearchCluster + type: object + status: + description: OpensearchRoleStatus defines the observed state of OpensearchRole + properties: + existingRole: + type: boolean + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchsnapshotpolicies.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpensearchSnapshotPolicy + listKind: OpensearchSnapshotPolicyList + plural: opensearchsnapshotpolicies + singular: opensearchsnapshotpolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Existing policy state + jsonPath: .status.existingSnapshotPolicy + name: existingpolicy + type: boolean + - description: Snapshot policy name + jsonPath: .status.snapshotPolicyName + name: policyName + type: string + - jsonPath: .status.state + name: state + type: string + - jsonPath: .metadata.creationTimestamp + name: age + type: date + name: v1 + schema: + openAPIV3Schema: + description: OpensearchSnapshotPolicy is the Schema for the opensearchsnapshotpolicies + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + creation: + properties: + schedule: + properties: + cron: + properties: + expression: + type: string + timezone: + type: string + required: + - expression + - timezone + type: object + required: + - cron + type: object + timeLimit: + type: string + required: + - schedule + type: object + deletion: + properties: + deleteCondition: + properties: + maxAge: + type: string + maxCount: + type: integer + minCount: + type: integer + type: object + schedule: + properties: + cron: + properties: + expression: + type: string + timezone: + type: string + required: + - expression + - timezone + type: object + required: + - cron + type: object + timeLimit: + type: string + type: object + description: + type: string + enabled: + type: boolean + notification: + properties: + channel: + properties: + id: + type: string + required: + - id + type: object + conditions: + properties: + creation: + type: boolean + deletion: + type: boolean + failure: + type: boolean + type: object + required: + - channel + type: object + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + policyName: + type: string + snapshotConfig: + properties: + dateFormat: + type: string + dateFormatTimezone: + type: string + ignoreUnavailable: + type: boolean + includeGlobalState: + type: boolean + indices: + type: string + metadata: + additionalProperties: + type: string + type: object + partial: + type: boolean + repository: + type: string + required: + - repository + type: object + required: + - creation + - opensearchCluster + - policyName + - snapshotConfig + type: object + status: + description: OpensearchSnapshotPolicyStatus defines the observed state + of OpensearchSnapshotPolicy + properties: + existingSnapshotPolicy: + type: boolean + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + reason: + type: string + snapshotPolicyName: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchtenants.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpensearchTenant + listKind: OpensearchTenantList + plural: opensearchtenants + shortNames: + - opensearchtenant + singular: opensearchtenant + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpensearchTenant is the Schema for the opensearchtenants API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OpensearchTenantSpec defines the desired state of OpensearchTenant + properties: + description: + type: string + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - opensearchCluster + type: object + status: + description: OpensearchTenantStatus defines the observed state of OpensearchTenant + properties: + existingTenant: + type: boolean + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchuserrolebindings.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpensearchUserRoleBinding + listKind: OpensearchUserRoleBindingList + plural: opensearchuserrolebindings + shortNames: + - opensearchuserrolebinding + singular: opensearchuserrolebinding + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpensearchUserRoleBinding is the Schema for the opensearchuserrolebindings + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OpensearchUserRoleBindingSpec defines the desired state of + OpensearchUserRoleBinding + properties: + backendRoles: + items: + type: string + type: array + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + roles: + items: + type: string + type: array + users: + items: + type: string + type: array + required: + - opensearchCluster + - roles + type: object + status: + description: OpensearchUserRoleBindingStatus defines the observed state + of OpensearchUserRoleBinding + properties: + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + provisionedBackendRoles: + items: + type: string + type: array + provisionedRoles: + items: + type: string + type: array + provisionedUsers: + items: + type: string + type: array + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.0 + name: opensearchusers.opensearch.opster.io +spec: + group: opensearch.opster.io + names: + kind: OpensearchUser + listKind: OpensearchUserList + plural: opensearchusers + shortNames: + - opensearchuser + singular: opensearchuser + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpensearchUser is the Schema for the opensearchusers API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OpensearchUserSpec defines the desired state of OpensearchUser + properties: + attributes: + additionalProperties: + type: string + type: object + backendRoles: + items: + type: string + type: array + opendistroSecurityRoles: + items: + type: string + type: array + opensearchCluster: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + passwordFrom: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - opensearchCluster + - passwordFrom + type: object + status: + description: OpensearchUserStatus defines the observed state of OpensearchUser + properties: + managedCluster: + description: |- + UID is a type that holds unique ID values, including UUIDs. Because we + don't ONLY use UUIDs, this is an alias to string. Being a type captures + intent and helps make sure that UIDs and names do not get conflated. + type: string + reason: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + name: servicemonitors.monitoring.coreos.com +spec: + conversion: + strategy: None + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: ServiceMonitor + listKind: ServiceMonitorList + plural: servicemonitors + singular: servicemonitor + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServiceMonitor defines monitoring for a set of services. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Service selection for target discovery + by Prometheus. + properties: + endpoints: + description: A list of endpoints allowed as part of this ServiceMonitor. + items: + description: Endpoint defines a scrapeable endpoint serving Prometheus + metrics. + properties: + authorization: + description: Authorization section for this endpoint + properties: + credentials: + description: The secret's key that contains the credentials + of the request + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults to Bearer, + Basic will cause an error + type: string + type: object + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over + basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' + properties: + password: + description: The secret in the service monitor namespace + that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace + that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenFile: + description: File to read bearer token for scraping targets. + type: string + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping + targets. The secret needs to be in the same namespace as the + service monitor and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + followRedirects: + description: FollowRedirects configures whether scrape requests + follow HTTP 3xx redirects. + type: boolean + honorLabels: + description: HonorLabels chooses the metric's labels on collisions + with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects + the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before + ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It + defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + default: replace + description: Action to perform based on regex matching. + Default is 'replace' + enum: + - replace + - keep + - drop + - hashmod + - labelmap + - labeldrop + - labelkeep + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + description: LabelName is a valid Prometheus label name + which may only contain ASCII letters, numbers, as + well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + oauth2: + description: OAuth2 for the URL. Only valid in Prometheus versions + 2.27.0 and newer. + properties: + clientId: + description: The secret or configmap containing the OAuth2 + client id + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + clientSecret: + description: The secret containing the OAuth2 client secret + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + endpointParams: + additionalProperties: + type: string + description: Parameters to append to the token URL + type: object + scopes: + description: OAuth2 scopes used for the token request + items: + type: string + type: array + tokenUrl: + description: The URL to fetch the token from + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + description: Optional HTTP URL parameters + type: object + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the service port this endpoint refers to. + Mutually exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes + to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before scraping. + Prometheus Operator automatically adds relabelings for a few + standard Kubernetes fields. The original scrape job''s name + is available via the `__tmp_prometheus_job_name` label. More + info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It + defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + default: replace + description: Action to perform based on regex matching. + Default is 'replace' + enum: + - replace + - keep + - drop + - hashmod + - labelmap + - labeldrop + - labelkeep + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + description: LabelName is a valid Prometheus label name + which may only contain ASCII letters, numbers, as + well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: Name or number of the target port of the Pod behind + the Service, the port must be specified with container port + property. Mutually exclusive with port. + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the endpoint + properties: + ca: + description: Struct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for + the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + type: array + jobLabel: + description: "Chooses the label of the Kubernetes `Endpoints`. Its + value will be used for the `job`-label's value of the created metrics. + \n Default & fallback value: the name of the respective Kubernetes + `Endpoint`." + type: string + labelLimit: + description: Per-scrape limit on number of labels that will be accepted + for a sample. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + labelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be + accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + labelValueLengthLimit: + description: Per-scrape limit on length of labels value that will + be accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + namespaceSelector: + description: Selector to select which namespaces the Kubernetes Endpoints + objects are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected + in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names to select from. + items: + type: string + type: array + type: object + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes `Pod` + onto the created metrics. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Endpoints objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + targetLabels: + description: TargetLabels transfers labels from the Kubernetes `Service` + onto the created metrics. + items: + type: string + type: array + targetLimit: + description: TargetLimit defines a limit on the number of scraped + targets that will be accepted. + format: int64 + type: integer + required: + - endpoints + - selector + type: object + required: + - spec + type: object + served: true + storage: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: opensearch-operator-controller-manager + namespace: opensearch-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: opensearch-operator-leader-election-role + namespace: opensearch-operator-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: opensearch-operator-manager-role +rules: +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - namespaces + - persistentvolumeclaims + - pods + - secrets + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - opensearch.opster.io + resources: + - events + verbs: + - create + - patch +- apiGroups: + - opensearch.opster.io + resources: + - opensearchactiongroups + - opensearchclusters + - opensearchcomponenttemplates + - opensearchindextemplates + - opensearchismpolicies + - opensearchroles + - opensearchsnapshotpolicies + - opensearchtenants + - opensearchuserrolebindings + - opensearchusers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - opensearch.opster.io + resources: + - opensearchactiongroups/finalizers + - opensearchclusters/finalizers + - opensearchcomponenttemplates/finalizers + - opensearchindextemplates/finalizers + - opensearchismpolicies/finalizers + - opensearchroles/finalizers + - opensearchsnapshotpolicies/finalizers + - opensearchtenants/finalizers + - opensearchuserrolebindings/finalizers + - opensearchusers/finalizers + verbs: + - update +- apiGroups: + - opensearch.opster.io + resources: + - opensearchactiongroups/status + - opensearchclusters/status + - opensearchcomponenttemplates/status + - opensearchindextemplates/status + - opensearchismpolicies/status + - opensearchroles/status + - opensearchsnapshotpolicies/status + - opensearchtenants/status + - opensearchuserrolebindings/status + - opensearchusers/status + verbs: + - get + - patch + - update +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: opensearch-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: opensearch-operator-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: opensearch-operator-leader-election-rolebinding + namespace: opensearch-operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: opensearch-operator-leader-election-role +subjects: +- kind: ServiceAccount + name: opensearch-operator-controller-manager + namespace: opensearch-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: opensearch-operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: opensearch-operator-manager-role +subjects: +- kind: ServiceAccount + name: opensearch-operator-controller-manager + namespace: opensearch-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: opensearch-operator-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: opensearch-operator-proxy-role +subjects: +- kind: ServiceAccount + name: opensearch-operator-controller-manager + namespace: opensearch-operator-system +--- +apiVersion: v1 +data: + controller_manager_config.yaml: | + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + kind: ControllerManagerConfig + health: + healthProbeBindAddress: :8081 + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 + leaderElection: + leaderElect: true + resourceName: a867c7dc.opensearch.opster.io +kind: ConfigMap +metadata: + name: opensearch-operator-manager-config + namespace: opensearch-operator-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + name: opensearch-operator-controller-manager-metrics-service + namespace: opensearch-operator-system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + control-plane: controller-manager + name: opensearch-operator-controller-manager + namespace: opensearch-operator-system +spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=10 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + command: + - /manager + image: controller:latest + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 100m + memory: 30Mi + requests: + cpu: 100m + memory: 20Mi + securityContext: + allowPrivilegeEscalation: false + securityContext: + runAsNonRoot: true + serviceAccountName: opensearch-operator-controller-manager + terminationGracePeriodSeconds: 10 From b2a7190c63e161fb8fb2295939b830a794d0dbf8 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 9 Nov 2025 22:04:28 +0000 Subject: [PATCH 80/86] Add adaptive export vizier service code. Verify it works adhoc Signed-off-by: Dom Del Nano --- .../services/adaptive_export/BUILD.bazel | 46 +++ .../services/adaptive_export/cmd/main.go | 156 +++++++ .../internal/config/BUILD.bazel | 35 ++ .../adaptive_export/internal/config/config.go | 390 ++++++++++++++++++ .../internal/config/definition.go | 66 +++ .../internal/pixie/BUILD.bazel | 34 ++ .../adaptive_export/internal/pixie/pixie.go | 256 ++++++++++++ .../internal/script/BUILD.bazel | 24 ++ .../adaptive_export/internal/script/script.go | 114 +++++ 9 files changed, 1121 insertions(+) create mode 100644 src/vizier/services/adaptive_export/BUILD.bazel create mode 100644 src/vizier/services/adaptive_export/cmd/main.go create mode 100644 src/vizier/services/adaptive_export/internal/config/BUILD.bazel create mode 100644 src/vizier/services/adaptive_export/internal/config/config.go create mode 100644 src/vizier/services/adaptive_export/internal/config/definition.go create mode 100644 src/vizier/services/adaptive_export/internal/pixie/BUILD.bazel create mode 100644 src/vizier/services/adaptive_export/internal/pixie/pixie.go create mode 100644 src/vizier/services/adaptive_export/internal/script/BUILD.bazel create mode 100644 src/vizier/services/adaptive_export/internal/script/script.go diff --git a/src/vizier/services/adaptive_export/BUILD.bazel b/src/vizier/services/adaptive_export/BUILD.bazel new file mode 100644 index 00000000000..355a1ec2117 --- /dev/null +++ b/src/vizier/services/adaptive_export/BUILD.bazel @@ -0,0 +1,46 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("//bazel:pl_build_system.bzl", "pl_go_image") + +go_library( + name = "adaptive_export_lib", + srcs = ["cmd/main.go"], + importpath = "px.dev/pixie/src/vizier/services/adaptive_export", + visibility = ["//visibility:private"], + deps = [ + "//src/vizier/services/adaptive_export/internal/config", + "//src/vizier/services/adaptive_export/internal/pixie", + "//src/vizier/services/adaptive_export/internal/script", + "@com_github_sirupsen_logrus//:logrus", + ], +) + +go_binary( + name = "adaptive_export", + embed = [":adaptive_export_lib"], + visibility = ["//visibility:public"], +) + +pl_go_image( + name = "adaptive_export_image", + binary = ":adaptive_export", + visibility = [ + "//k8s:__subpackages__", + "//src/vizier:__subpackages__", + ], +) diff --git a/src/vizier/services/adaptive_export/cmd/main.go b/src/vizier/services/adaptive_export/cmd/main.go new file mode 100644 index 00000000000..bfd39aff56c --- /dev/null +++ b/src/vizier/services/adaptive_export/cmd/main.go @@ -0,0 +1,156 @@ +// Copyright 2018- The Pixie Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "fmt" + "os" + "time" + + log "github.com/sirupsen/logrus" + + "px.dev/pixie/src/vizier/services/adaptive_export/internal/config" + "px.dev/pixie/src/vizier/services/adaptive_export/internal/pixie" + "px.dev/pixie/src/vizier/services/adaptive_export/internal/script" +) + +const ( + defaultRetries = 100 + defaultSleepTime = 15 * time.Second +) + +func main() { + ctx := context.Background() + + log.Info("Starting the setup of the ClickHouse Pixie plugin") + cfg, err := config.GetConfig() + if err != nil { + log.Error(err) + os.Exit(1) + } + + clusterId := cfg.Pixie().ClusterID() + clusterName := cfg.Worker().ClusterName() + + log.Infof("Setting up Pixie plugin for cluster-id %s", clusterId) + client, err := setupPixie(ctx, cfg.Pixie(), defaultRetries, defaultSleepTime) + if err != nil { + log.WithError(err).Fatal("setting up Pixie client failed") + } + + log.Info("Checking the current ClickHouse plugin configuration") + plugin, err := client.GetClickHousePlugin() + if err != nil { + log.WithError(err).Fatal("getting data retention plugins failed") + } + + enablePlugin := true + if plugin.RetentionEnabled { + enablePlugin = false + config, err := client.GetClickHousePluginConfig() + if err != nil { + log.WithError(err).Fatal("getting ClickHouse plugin config failed") + } + if config.ExportUrl != cfg.ClickHouse().DSN() { + log.Info("ClickHouse plugin is configured with different DSN... Overwriting") + enablePlugin = true + } + } + + if enablePlugin { + log.Info("Enabling ClickHouse plugin") + err := client.EnableClickHousePlugin(&pixie.ClickHousePluginConfig{ + ExportUrl: cfg.ClickHouse().DSN(), + }, plugin.LatestVersion) + if err != nil { + log.WithError(err).Fatal("failed to enabled ClickHouse plugin") + } + } + + log.Info("Setting up the data retention scripts") + + log.Info("Getting preset script from the Pixie plugin") + defsFromPixie, err := client.GetPresetScripts() + if err != nil { + log.WithError(err).Fatal("failed to get preset scripts") + } + + definitions := defsFromPixie + + log.Infof("Getting current scripts for cluster") + currentScripts, err := client.GetClusterScripts(clusterId, clusterName) + if err != nil { + log.WithError(err).Fatal("failed to get data retention scripts") + } + + actions := script.GetActions(definitions, currentScripts, script.ScriptConfig{ + ClusterName: clusterName, + ClusterId: clusterId, + CollectInterval: cfg.Worker().CollectInterval(), + }) + + var errs []error + + for _, s := range actions.ToDelete { + log.Infof("Deleting script %s", s.Name) + err := client.DeleteDataRetentionScript(s.ScriptId) + if err != nil { + errs = append(errs, err) + } + } + + for _, s := range actions.ToUpdate { + log.Infof("Updating script %s", s.Name) + err := client.UpdateDataRetentionScript(clusterId, s.ScriptId, s.Name, s.Description, s.FrequencyS, s.Script) + if err != nil { + errs = append(errs, err) + } + } + + for _, s := range actions.ToCreate { + log.Infof("Creating script %s", s.Name) + err := client.AddDataRetentionScript(clusterId, s.Name, s.Description, s.FrequencyS, s.Script) + if err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + log.Fatalf("errors while setting up data retention scripts: %v", errs) + } + + log.Info("All done! The ClickHouse plugin is now configured.") + os.Exit(0) +} + +func setupPixie(ctx context.Context, cfg config.Pixie, tries int, sleepTime time.Duration) (*pixie.Client, error) { + apiKey := cfg.APIKey() + host := cfg.Host() + log.Infof("setupPixie: API Key length=%d, Host=%s", len(apiKey), host) + + for tries > 0 { + client, err := pixie.NewClient(ctx, apiKey, host) + if err == nil { + return client, nil + } + tries -= 1 + log.WithError(err).Warning("error creating Pixie API client") + time.Sleep(sleepTime) + } + return nil, fmt.Errorf("exceeded maximum number of retries") +} diff --git a/src/vizier/services/adaptive_export/internal/config/BUILD.bazel b/src/vizier/services/adaptive_export/internal/config/BUILD.bazel new file mode 100644 index 00000000000..413451fc77b --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/config/BUILD.bazel @@ -0,0 +1,35 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "config", + srcs = [ + "config.go", + "definition.go", + ], + importpath = "px.dev/pixie/src/vizier/services/adaptive_export/internal/config", + visibility = ["//src/vizier/services/adaptive_export:__subpackages__"], + deps = [ + "//src/utils/shared/k8s", + "//src/vizier/services/adaptive_export/internal/script", + "@com_github_sirupsen_logrus//:logrus", + "@in_gopkg_yaml_v2//:yaml_v2", + "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", + "@io_k8s_client_go//kubernetes", + ], +) diff --git a/src/vizier/services/adaptive_export/internal/config/config.go b/src/vizier/services/adaptive_export/internal/config/config.go new file mode 100644 index 00000000000..9542a01437e --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/config/config.go @@ -0,0 +1,390 @@ +// Copyright 2018- The Pixie Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "sync" + + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + "px.dev/pixie/src/utils/shared/k8s" +) + +const ( + envVerbose = "VERBOSE" + envClickHouseDSN = "CLICKHOUSE_DSN" + envPixieClusterID = "PIXIE_CLUSTER_ID" + envPixieEndpoint = "PIXIE_ENDPOINT" + envPixieAPIKey = "PIXIE_API_KEY" + envClusterName = "CLUSTER_NAME" + envCollectInterval = "COLLECT_INTERVAL_SEC" + defPixieHostname = "work.withpixie.ai:443" + boolTrue = "true" + defCollectInterval = 30 +) + +var ( + integrationVersion = "0.0.0" + gitCommit = "" + buildDate = "" + once sync.Once + instance Config +) + +// findVizierNamespace looks for the namespace that the vizier is running in. +func findVizierNamespace(clientset *kubernetes.Clientset) (string, error) { + vzPods, err := clientset.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{ + LabelSelector: "component=vizier", + }) + if err != nil { + return "", err + } + + if len(vzPods.Items) == 0 { + return "", fmt.Errorf("no vizier pods found") + } + + return vzPods.Items[0].Namespace, nil +} + +// getK8sConfig attempts to read configuration from Kubernetes secrets and configmaps. +// Returns (clusterID, apiKey, clusterName, host, error). +func getK8sConfig() (string, string, string, string, error) { + config := k8s.GetConfig() + if config == nil { + return "", "", "", "", fmt.Errorf("unable to get kubernetes config") + } + + clientset := k8s.GetClientset(config) + if clientset == nil { + return "", "", "", "", fmt.Errorf("unable to get kubernetes clientset") + } + + vzNs, err := findVizierNamespace(clientset) + if err != nil || vzNs == "" { + return "", "", "", "", fmt.Errorf("unable to find vizier namespace: %w", err) + } + + // Get cluster-id and cluster-name from pl-cluster-secrets + clusterSecrets := k8s.GetSecret(clientset, vzNs, "pl-cluster-secrets") + if clusterSecrets == nil { + return "", "", "", "", fmt.Errorf("unable to get pl-cluster-secrets") + } + + clusterID := "" + if cID, ok := clusterSecrets.Data["cluster-id"]; ok { + clusterID = string(cID) + } + + clusterName := "" + if cn, ok := clusterSecrets.Data["cluster-name"]; ok { + clusterName = string(cn) + } + + // Note: pl-deploy-secrets contains the deployment key (for registering vizier), + // not the user API key (for accessing cloud APIs). The user API key must be + // provided via PIXIE_API_KEY environment variable. + apiKey := "" + + // Get PL_CLOUD_ADDR from pl-cloud-config + cloudConfig, err := clientset.CoreV1().ConfigMaps(vzNs).Get(context.Background(), "pl-cloud-config", metav1.GetOptions{}) + host := "" + if err == nil { + if addr, ok := cloudConfig.Data["PL_CLOUD_ADDR"]; ok { + host = addr + } + } + + return clusterID, apiKey, clusterName, host, nil +} + +func GetConfig() (Config, error) { + var err error + once.Do(func() { + err = setUpConfig() + }) + return instance, err +} + +func setUpConfig() error { + log.SetLevel(log.InfoLevel) + if strings.EqualFold(os.Getenv(envVerbose), boolTrue) { + log.SetLevel(log.DebugLevel) + } + + // Try to read configuration from environment variables first + clickhouseDSN := os.Getenv(envClickHouseDSN) + pixieClusterID := os.Getenv(envPixieClusterID) + pixieAPIKey := os.Getenv(envPixieAPIKey) + clusterName := os.Getenv(envClusterName) + pixieHost := getEnvWithDefault(envPixieEndpoint, defPixieHostname) + + log.Debugf("Config from environment - ClickHouse DSN: %s", clickhouseDSN) + log.Debugf("Config from environment - Pixie Cluster ID: %s", pixieClusterID) + log.Debugf("Config from environment - Pixie API Key: %s", pixieAPIKey) + log.Debugf("Config from environment - Cluster Name: %s", clusterName) + log.Debugf("Config from environment - Pixie Host: %s", pixieHost) + + // If key values are not set via environment, try reading from Kubernetes + // Note: API key cannot be read from K8s (only deployment key is there), must be provided via env + if pixieClusterID == "" || clusterName == "" || pixieHost == defPixieHostname { + log.Info("Attempting to read Pixie configuration from Kubernetes resources...") + k8sClusterID, _, k8sClusterName, k8sHost, err := getK8sConfig() + if err != nil { + log.WithError(err).Warn("Failed to read configuration from Kubernetes, will use environment variables only") + } else { + // Use k8s values only if env vars are not set + if pixieClusterID == "" { + pixieClusterID = k8sClusterID + log.Debugf("Using cluster ID from Kubernetes: %s", pixieClusterID) + } + if clusterName == "" { + clusterName = k8sClusterName + log.Debugf("Using cluster name from Kubernetes: %s", clusterName) + } + if pixieHost == defPixieHostname && k8sHost != "" { + pixieHost = k8sHost + log.Debugf("Using host from Kubernetes: %s", pixieHost) + } + } + } + + log.Debugf("Final config - Pixie Cluster ID: %s", pixieClusterID) + log.Debugf("Final config - Pixie API Key: %s", pixieAPIKey) + log.Debugf("Final config - Cluster Name: %s", clusterName) + log.Debugf("Final config - Pixie Host: %s", pixieHost) + log.Debugf("Final config - ClickHouse DSN: %s", clickhouseDSN) + + collectInterval, err := getIntEnvWithDefault(envCollectInterval, defCollectInterval) + if err != nil { + return err + } + + instance = &config{ + settings: &settings{ + buildDate: buildDate, + commit: gitCommit, + version: integrationVersion, + }, + worker: &worker{ + clusterName: clusterName, + pixieClusterID: pixieClusterID, + collectInterval: collectInterval, + }, + clickhouse: &clickhouse{ + dsn: clickhouseDSN, + userAgent: "pixie-clickhouse/" + integrationVersion, + }, + pixie: &pixie{ + apiKey: pixieAPIKey, + clusterID: pixieClusterID, + host: pixieHost, + }, + } + return instance.validate() +} + +func getEnvWithDefault(key, defaultValue string) string { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + return value +} + +func getIntEnvWithDefault(key string, defaultValue int64) (int64, error) { + value := os.Getenv(key) + if value == "" { + return defaultValue, nil + } + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return 0, fmt.Errorf("Environment variable %s is not an integer.", key) + } + return i, nil +} + +type Config interface { + Verbose() bool + Settings() Settings + ClickHouse() ClickHouse + Pixie() Pixie + Worker() Worker + validate() error +} + +type config struct { + verbose bool + worker Worker + clickhouse ClickHouse + pixie Pixie + settings Settings +} + +func (c *config) validate() error { + if err := c.Pixie().validate(); err != nil { + return fmt.Errorf("error validating pixie config: %w", err) + } + if err := c.Worker().validate(); err != nil { + return fmt.Errorf("error validating worker config: %w", err) + } + return c.ClickHouse().validate() +} + +func (c *config) Settings() Settings { + return c.settings +} + +func (c *config) Verbose() bool { + return c.verbose +} + +func (c *config) ClickHouse() ClickHouse { + return c.clickhouse +} + +func (c *config) Worker() Worker { + return c.worker +} + +func (c *config) Pixie() Pixie { + return c.pixie +} + +type Settings interface { + Version() string + Commit() string + BuildDate() string +} + +type settings struct { + buildDate string + commit string + version string +} + +func (s *settings) Version() string { + return s.version +} + +func (s *settings) Commit() string { + return s.commit +} + +func (s *settings) BuildDate() string { + return s.buildDate +} + +type ClickHouse interface { + DSN() string + UserAgent() string + validate() error +} + +type clickhouse struct { + dsn string + userAgent string +} + +func (c *clickhouse) validate() error { + if c.dsn == "" { + return fmt.Errorf("missing required env variable '%s'", envClickHouseDSN) + } + return nil +} + +func (c *clickhouse) DSN() string { + return c.dsn +} + +func (c *clickhouse) UserAgent() string { + return c.userAgent +} + +type Pixie interface { + APIKey() string + ClusterID() string + Host() string + validate() error +} + +type pixie struct { + apiKey string + clusterID string + host string +} + +func (p *pixie) validate() error { + if p.apiKey == "" { + return fmt.Errorf("missing required env variable '%s'", envPixieAPIKey) + } + if p.clusterID == "" { + return fmt.Errorf("missing required env variable '%s'", envPixieClusterID) + } + return nil +} + +func (p *pixie) APIKey() string { + return p.apiKey +} + +func (p *pixie) ClusterID() string { + return p.clusterID +} + +func (p *pixie) Host() string { + return p.host +} + +type Worker interface { + ClusterName() string + PixieClusterID() string + CollectInterval() int64 + validate() error +} + +type worker struct { + clusterName string + pixieClusterID string + collectInterval int64 +} + +func (a *worker) validate() error { + if a.clusterName == "" { + return fmt.Errorf("missing required env variable '%s'", envClusterName) + } + return nil +} + +func (a *worker) ClusterName() string { + return a.clusterName +} + +func (a *worker) PixieClusterID() string { + return a.pixieClusterID +} + +func (a *worker) CollectInterval() int64 { + return a.collectInterval +} diff --git a/src/vizier/services/adaptive_export/internal/config/definition.go b/src/vizier/services/adaptive_export/internal/config/definition.go new file mode 100644 index 00000000000..fd772022753 --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/config/definition.go @@ -0,0 +1,66 @@ +// Copyright 2018- The Pixie Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v2" + + "px.dev/pixie/src/vizier/services/adaptive_export/internal/script" +) + +const scriptExtension = ".yaml" + +// ReadScriptDefinitions reads the script definition from the given directory path. +// Only .yaml files are read and subdirectories are not traversed. +func ReadScriptDefinitions(dir string) ([]*script.ScriptDefinition, error) { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return nil, nil + } + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + var l []*script.ScriptDefinition + for _, file := range files { + if strings.HasSuffix(file.Name(), scriptExtension) { + description, err := readScriptDefinition(filepath.Join(dir, file.Name())) + if err != nil { + return nil, err + } + l = append(l, description) + } + } + return l, nil +} + +func readScriptDefinition(path string) (*script.ScriptDefinition, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var definition script.ScriptDefinition + err = yaml.Unmarshal(content, &definition) + if err != nil { + return nil, err + } + return &definition, nil +} diff --git a/src/vizier/services/adaptive_export/internal/pixie/BUILD.bazel b/src/vizier/services/adaptive_export/internal/pixie/BUILD.bazel new file mode 100644 index 00000000000..29f239170a0 --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/pixie/BUILD.bazel @@ -0,0 +1,34 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "pixie", + srcs = ["pixie.go"], + importpath = "px.dev/pixie/src/vizier/services/adaptive_export/internal/pixie", + visibility = ["//src/vizier/services/adaptive_export:__subpackages__"], + deps = [ + "//src/api/go/pxapi/utils", + "//src/api/proto/cloudpb:cloudapi_pl_go_proto", + "//src/api/proto/uuidpb:uuid_pl_go_proto", + "//src/vizier/services/adaptive_export/internal/script", + "@com_github_gogo_protobuf//types", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//metadata", + ], +) diff --git a/src/vizier/services/adaptive_export/internal/pixie/pixie.go b/src/vizier/services/adaptive_export/internal/pixie/pixie.go new file mode 100644 index 00000000000..bb761fc631d --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/pixie/pixie.go @@ -0,0 +1,256 @@ +// Copyright 2018- The Pixie Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package pixie + +import ( + "context" + "crypto/tls" + "fmt" + "strings" + + "github.com/gogo/protobuf/types" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "px.dev/pixie/src/api/go/pxapi/utils" + "px.dev/pixie/src/api/proto/cloudpb" + "px.dev/pixie/src/api/proto/uuidpb" + + "px.dev/pixie/src/vizier/services/adaptive_export/internal/script" +) + +const ( + clickhousePluginId = "clickhouse" + exportUrlConfig = "exportURL" +) + +type Client struct { + cloudAddr string + ctx context.Context + + grpcConn *grpc.ClientConn + pluginClient cloudpb.PluginServiceClient +} + +func NewClient(ctx context.Context, apiKey string, cloudAddr string) (*Client, error) { + fmt.Printf("DEBUG: NewClient called with apiKey length: %d, cloudAddr: %s\n", len(apiKey), cloudAddr) + if apiKey == "" { + fmt.Println("WARNING: API key is empty!") + } + + c := &Client{ + cloudAddr: cloudAddr, + ctx: metadata.AppendToOutgoingContext(ctx, "pixie-api-key", apiKey), + } + + // Debug: check what's in the context + md, ok := metadata.FromOutgoingContext(c.ctx) + if ok { + fmt.Printf("DEBUG: Context metadata: %v\n", md) + } else { + fmt.Println("WARNING: No metadata in context!") + } + + if err := c.init(); err != nil { + return nil, err + } + + return c, nil +} + +func (c *Client) init() error { + isInternal := strings.ContainsAny(c.cloudAddr, "cluster.local") + + tlsConfig := &tls.Config{InsecureSkipVerify: isInternal} + creds := credentials.NewTLS(tlsConfig) + + conn, err := grpc.Dial(c.cloudAddr, grpc.WithTransportCredentials(creds)) + if err != nil { + return err + } + + c.grpcConn = conn + c.pluginClient = cloudpb.NewPluginServiceClient(conn) + return nil +} + +func (c *Client) GetClickHousePlugin() (*cloudpb.Plugin, error) { + req := &cloudpb.GetPluginsRequest{ + Kind: cloudpb.PK_RETENTION, + } + resp, err := c.pluginClient.GetPlugins(c.ctx, req) + if err != nil { + return nil, err + } + for _, plugin := range resp.Plugins { + if plugin.Id == clickhousePluginId { + return plugin, nil + } + } + return nil, fmt.Errorf("the %s plugin could not be found", clickhousePluginId) +} + +type ClickHousePluginConfig struct { + ExportUrl string +} + +func (c *Client) GetClickHousePluginConfig() (*ClickHousePluginConfig, error) { + req := &cloudpb.GetOrgRetentionPluginConfigRequest{ + PluginId: clickhousePluginId, + } + resp, err := c.pluginClient.GetOrgRetentionPluginConfig(c.ctx, req) + if err != nil { + return nil, err + } + exportUrl := resp.CustomExportUrl + if exportUrl == "" { + exportUrl, err = c.getDefaultClickHouseExportUrl() + if err != nil { + return nil, err + } + } + return &ClickHousePluginConfig{ + ExportUrl: exportUrl, + }, nil +} + +func (c *Client) getDefaultClickHouseExportUrl() (string, error) { + req := &cloudpb.GetRetentionPluginInfoRequest{ + PluginId: clickhousePluginId, + } + info, err := c.pluginClient.GetRetentionPluginInfo(c.ctx, req) + if err != nil { + return "", err + } + return info.DefaultExportURL, nil +} + +func (c *Client) EnableClickHousePlugin(config *ClickHousePluginConfig, version string) error { + req := &cloudpb.UpdateRetentionPluginConfigRequest{ + PluginId: clickhousePluginId, + Configs: map[string]string{ + exportUrlConfig: config.ExportUrl, + }, + Enabled: &types.BoolValue{Value: true}, + Version: &types.StringValue{Value: version}, + CustomExportUrl: &types.StringValue{Value: config.ExportUrl}, + InsecureTLS: &types.BoolValue{Value: false}, + DisablePresets: &types.BoolValue{Value: true}, + } + _, err := c.pluginClient.UpdateRetentionPluginConfig(c.ctx, req) + return err +} + +func (c *Client) GetPresetScripts() ([]*script.ScriptDefinition, error) { + resp, err := c.pluginClient.GetRetentionScripts(c.ctx, &cloudpb.GetRetentionScriptsRequest{}) + if err != nil { + return nil, err + } + var l []*script.ScriptDefinition + for _, s := range resp.Scripts { + if s.PluginId == clickhousePluginId && s.IsPreset { + sd, err := c.getScriptDefinition(s) + if err != nil { + return nil, err + } + l = append(l, sd) + } + } + return l, nil +} + +func (c *Client) GetClusterScripts(clusterId, clusterName string) ([]*script.Script, error) { + resp, err := c.pluginClient.GetRetentionScripts(c.ctx, &cloudpb.GetRetentionScriptsRequest{}) + if err != nil { + return nil, err + } + var l []*script.Script + for _, s := range resp.Scripts { + if s.PluginId == clickhousePluginId { + sd, err := c.getScriptDefinition(s) + if err != nil { + return nil, err + } + l = append(l, &script.Script{ + ScriptDefinition: *sd, + ScriptId: utils.ProtoToUUIDStr(s.ScriptID), + ClusterIds: getClusterIdsAsString(s.ClusterIDs), + }) + } + } + return l, nil +} + +func getClusterIdsAsString(clusterIDs []*uuidpb.UUID) string { + scriptClusterId := "" + for i, id := range clusterIDs { + if i > 0 { + scriptClusterId = scriptClusterId + "," + } + scriptClusterId = scriptClusterId + utils.ProtoToUUIDStr(id) + } + return scriptClusterId +} + +func (c *Client) getScriptDefinition(s *cloudpb.RetentionScript) (*script.ScriptDefinition, error) { + resp, err := c.pluginClient.GetRetentionScript(c.ctx, &cloudpb.GetRetentionScriptRequest{ID: s.ScriptID}) + if err != nil { + return nil, err + } + return &script.ScriptDefinition{ + Name: s.ScriptName, + Description: s.Description, + FrequencyS: s.FrequencyS, + Script: resp.Contents, + IsPreset: s.IsPreset, + }, nil +} + +func (c *Client) AddDataRetentionScript(clusterId string, scriptName string, description string, frequencyS int64, contents string) error { + req := &cloudpb.CreateRetentionScriptRequest{ + ScriptName: scriptName, + Description: description, + FrequencyS: frequencyS, + Contents: contents, + ClusterIDs: []*uuidpb.UUID{utils.ProtoFromUUIDStrOrNil(clusterId)}, + PluginId: clickhousePluginId, + } + _, err := c.pluginClient.CreateRetentionScript(c.ctx, req) + return err +} + +func (c *Client) UpdateDataRetentionScript(clusterId string, scriptId string, scriptName string, description string, frequencyS int64, contents string) error { + req := &cloudpb.UpdateRetentionScriptRequest{ + ID: utils.ProtoFromUUIDStrOrNil(scriptId), + ScriptName: &types.StringValue{Value: scriptName}, + Description: &types.StringValue{Value: description}, + Enabled: &types.BoolValue{Value: true}, + FrequencyS: &types.Int64Value{Value: frequencyS}, + Contents: &types.StringValue{Value: contents}, + ClusterIDs: []*uuidpb.UUID{utils.ProtoFromUUIDStrOrNil(clusterId)}, + } + _, err := c.pluginClient.UpdateRetentionScript(c.ctx, req) + return err +} + +func (c *Client) DeleteDataRetentionScript(scriptId string) error { + req := &cloudpb.DeleteRetentionScriptRequest{ + ID: utils.ProtoFromUUIDStrOrNil(scriptId), + } + _, err := c.pluginClient.DeleteRetentionScript(c.ctx, req) + return err +} diff --git a/src/vizier/services/adaptive_export/internal/script/BUILD.bazel b/src/vizier/services/adaptive_export/internal/script/BUILD.bazel new file mode 100644 index 00000000000..28d764063a4 --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/script/BUILD.bazel @@ -0,0 +1,24 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "script", + srcs = ["script.go"], + importpath = "px.dev/pixie/src/vizier/services/adaptive_export/internal/script", + visibility = ["//src/vizier/services/adaptive_export:__subpackages__"], +) diff --git a/src/vizier/services/adaptive_export/internal/script/script.go b/src/vizier/services/adaptive_export/internal/script/script.go new file mode 100644 index 00000000000..23005ec8851 --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/script/script.go @@ -0,0 +1,114 @@ +// Copyright 2018- The Pixie Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package script + +import ( + "fmt" + "strings" +) + +const ( + scriptPrefix = "ch-" +) + +type ScriptConfig struct { + ClusterName string + ClusterId string + CollectInterval int64 +} + +type Script struct { + ScriptDefinition + ScriptId string + ClusterIds string +} + +type ScriptDefinition struct { + Name string `yaml:"name"` + Description string `yaml:"description"` + FrequencyS int64 `yaml:"frequencyS"` + Script string `yaml:"script"` + IsPreset bool `yaml:"-"` +} + +type ScriptActions struct { + ToDelete []*Script + ToUpdate []*Script + ToCreate []*Script +} + +func IsClickHouseScript(scriptName string) bool { + return strings.HasPrefix(scriptName, scriptPrefix) +} + +func IsScriptForCluster(scriptName, clusterName string) bool { + return IsClickHouseScript(scriptName) && strings.HasSuffix(scriptName, "-"+clusterName) +} + +func GetActions(scriptDefinitions []*ScriptDefinition, currentScripts []*Script, config ScriptConfig) ScriptActions { + definitions := make(map[string]ScriptDefinition) + for _, definition := range scriptDefinitions { + scriptName := getScriptName(definition.Name, config.ClusterName) + frequencyS := getInterval(definition, config) + if frequencyS > 0 { + definitions[scriptName] = ScriptDefinition{ + Name: scriptName, + Description: definition.Description, + FrequencyS: frequencyS, + Script: templateScript(definition, config), + } + } + } + actions := ScriptActions{} + for _, current := range currentScripts { + if definition, present := definitions[current.Name]; present { + if definition.Script != current.Script || definition.FrequencyS != current.FrequencyS || config.ClusterId != current.ClusterIds { + actions.ToUpdate = append(actions.ToUpdate, &Script{ + ScriptDefinition: definition, + ScriptId: current.ScriptId, + ClusterIds: config.ClusterId, + }) + } + delete(definitions, current.Name) + } else if IsClickHouseScript(current.Name) { + actions.ToDelete = append(actions.ToDelete, current) + } + } + for _, definition := range definitions { + actions.ToCreate = append(actions.ToCreate, &Script{ + ScriptDefinition: definition, + ClusterIds: config.ClusterId, + }) + } + return actions +} + +func getScriptName(scriptName string, clusterName string) string { + return fmt.Sprintf("%s%s-%s", scriptPrefix, scriptName, clusterName) +} + +func getInterval(definition *ScriptDefinition, config ScriptConfig) int64 { + if definition.FrequencyS == 0 { + return config.CollectInterval + } + return definition.FrequencyS +} + +func templateScript(definition *ScriptDefinition, config ScriptConfig) string { + // Return script as-is without any processing + return definition.Script +} From b824922928681876640874ea957b9b09ba88ecd7 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 9 Nov 2025 22:44:44 +0000 Subject: [PATCH 81/86] Ensure that '.beta' suffixed DataTables are handled properly since ClickHouse treats table names with periods as namespaced Signed-off-by: Dom Del Nano --- src/vizier/funcs/md_udtfs/md_udtfs_impl.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index 05b1dedbe84..1ae7b9900cb 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -1332,8 +1332,9 @@ class CreateClickHouseSchemas final : public carnot::udf::UDTF names = absl::StrSplit(table_name, '.'); + if (names.size() <= 0 || names.size() > 2) { + result.status = "error"; + result.message = "Invalid table name with multiple dots"; + results_.push_back(result); + continue; + } + table_name = names[0]; + // Generate CREATE TABLE statement std::string create_table_sql = GenerateCreateTableSQL(table_name, rel, use_if_not_exists_); From 32106330dfcdb20507285ba081fb47565cbeadc7 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Sun, 9 Nov 2025 22:45:10 +0000 Subject: [PATCH 82/86] Get adaptive_export running on k8s without adaptivity piece Signed-off-by: Dom Del Nano --- .../bootstrap/adaptive_export_deployment.yaml | 69 +++++++++++++++++++ .../bootstrap/adaptive_export_role.yaml | 64 +++++++++++++++++ .../bootstrap/adaptive_export_secrets.yaml | 11 +++ k8s/vizier/bootstrap/kustomization.yaml | 3 + skaffold/skaffold_vizier.yaml | 7 ++ .../internal/config/BUILD.bazel | 1 + .../adaptive_export/internal/config/config.go | 21 ++++-- 7 files changed, 170 insertions(+), 6 deletions(-) create mode 100644 k8s/vizier/bootstrap/adaptive_export_deployment.yaml create mode 100644 k8s/vizier/bootstrap/adaptive_export_role.yaml create mode 100644 k8s/vizier/bootstrap/adaptive_export_secrets.yaml diff --git a/k8s/vizier/bootstrap/adaptive_export_deployment.yaml b/k8s/vizier/bootstrap/adaptive_export_deployment.yaml new file mode 100644 index 00000000000..e076c905a32 --- /dev/null +++ b/k8s/vizier/bootstrap/adaptive_export_deployment.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: adaptive-export +spec: + replicas: 1 + selector: + matchLabels: + name: adaptive-export + template: + metadata: + labels: + name: adaptive-export + plane: control + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: Exists + - key: kubernetes.io/os + operator: In + values: + - linux + - matchExpressions: + - key: beta.kubernetes.io/os + operator: Exists + - key: beta.kubernetes.io/os + operator: In + values: + - linux + serviceAccountName: pl-adaptive-export-service-account + containers: + - name: adaptive-export + image: vizier-adaptive_export_image:latest + env: + - name: PL_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PIXIE_API_KEY + valueFrom: + secretKeyRef: + name: pl-adaptive-export-secrets + key: pixie-api-key + - name: CLICKHOUSE_DSN + valueFrom: + secretKeyRef: + name: pl-adaptive-export-secrets + key: clickhouse-dsn + - name: VERBOSE + value: "true" + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + securityContext: + runAsUser: 10100 + runAsGroup: 10100 + fsGroup: 10100 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/k8s/vizier/bootstrap/adaptive_export_role.yaml b/k8s/vizier/bootstrap/adaptive_export_role.yaml new file mode 100644 index 00000000000..33887150f37 --- /dev/null +++ b/k8s/vizier/bootstrap/adaptive_export_role.yaml @@ -0,0 +1,64 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pl-adaptive-export-service-account +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pl-adaptive-export-role +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pl-adaptive-export-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pl-adaptive-export-role +subjects: +- kind: ServiceAccount + name: pl-adaptive-export-service-account + namespace: pl +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pl-adaptive-export-cluster-role +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pl-adaptive-export-cluster-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pl-adaptive-export-cluster-role +subjects: +- kind: ServiceAccount + name: pl-adaptive-export-service-account + namespace: pl diff --git a/k8s/vizier/bootstrap/adaptive_export_secrets.yaml b/k8s/vizier/bootstrap/adaptive_export_secrets.yaml new file mode 100644 index 00000000000..92699282f6d --- /dev/null +++ b/k8s/vizier/bootstrap/adaptive_export_secrets.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: pl-adaptive-export-secrets +type: Opaque +stringData: + # Replace with your actual Pixie API key from https://work.withpixie.ai + pixie-api-key: "px-api-8c552c92-962d-4d9e-be6a-4fe310144497" + # Replace with your ClickHouse DSN: clickhouse://user:password@host:port/database + clickhouse-dsn: "otelcollector:otelcollectorpass@hyperdx-hdx-oss-v2-clickhouse.click.svc.cluster.local:9000/default" diff --git a/k8s/vizier/bootstrap/kustomization.yaml b/k8s/vizier/bootstrap/kustomization.yaml index 714f5676426..e373c6bbfe3 100644 --- a/k8s/vizier/bootstrap/kustomization.yaml +++ b/k8s/vizier/bootstrap/kustomization.yaml @@ -15,3 +15,6 @@ resources: - cert_provisioner_role.yaml - cert_provisioner_job.yaml - vizier_crd_role.yaml +- adaptive_export_role.yaml +- adaptive_export_secrets.yaml +- adaptive_export_deployment.yaml diff --git a/skaffold/skaffold_vizier.yaml b/skaffold/skaffold_vizier.yaml index 3efa172649a..33389dffb2e 100644 --- a/skaffold/skaffold_vizier.yaml +++ b/skaffold/skaffold_vizier.yaml @@ -45,6 +45,13 @@ build: args: - --config=x86_64_sysroot - --compilation_mode=opt + - image: vizier-adaptive_export_image + context: . + bazel: + target: //src/vizier/services/adaptive_export:adaptive_export_image.tar + args: + - --config=x86_64_sysroot + - --compilation_mode=opt tagPolicy: dateTime: {} local: diff --git a/src/vizier/services/adaptive_export/internal/config/BUILD.bazel b/src/vizier/services/adaptive_export/internal/config/BUILD.bazel index 413451fc77b..4d19f27afab 100644 --- a/src/vizier/services/adaptive_export/internal/config/BUILD.bazel +++ b/src/vizier/services/adaptive_export/internal/config/BUILD.bazel @@ -31,5 +31,6 @@ go_library( "@in_gopkg_yaml_v2//:yaml_v2", "@io_k8s_apimachinery//pkg/apis/meta/v1:meta", "@io_k8s_client_go//kubernetes", + "@io_k8s_client_go//rest", ], ) diff --git a/src/vizier/services/adaptive_export/internal/config/config.go b/src/vizier/services/adaptive_export/internal/config/config.go index 9542a01437e..8f777615f3f 100644 --- a/src/vizier/services/adaptive_export/internal/config/config.go +++ b/src/vizier/services/adaptive_export/internal/config/config.go @@ -27,6 +27,7 @@ import ( log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "px.dev/pixie/src/utils/shared/k8s" ) @@ -71,14 +72,22 @@ func findVizierNamespace(clientset *kubernetes.Clientset) (string, error) { // getK8sConfig attempts to read configuration from Kubernetes secrets and configmaps. // Returns (clusterID, apiKey, clusterName, host, error). func getK8sConfig() (string, string, string, string, error) { - config := k8s.GetConfig() - if config == nil { - return "", "", "", "", fmt.Errorf("unable to get kubernetes config") + // Try in-cluster config first (when running in K8s) + config, err := rest.InClusterConfig() + if err != nil { + log.WithError(err).Debug("In-cluster config not available, trying kubeconfig...") + // Fall back to kubeconfig for local/adhoc testing + config = k8s.GetConfig() + if config == nil { + return "", "", "", "", fmt.Errorf("unable to get kubernetes config") + } + } else { + log.Debug("Using in-cluster Kubernetes config") } - clientset := k8s.GetClientset(config) - if clientset == nil { - return "", "", "", "", fmt.Errorf("unable to get kubernetes clientset") + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return "", "", "", "", fmt.Errorf("unable to create kubernetes clientset: %w", err) } vzNs, err := findVizierNamespace(clientset) From e39ef30363eb97c69fe4b27974b153f662a7e440 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Mon, 10 Nov 2025 00:04:28 +0000 Subject: [PATCH 83/86] Test adaptive_export end to end Signed-off-by: Dom Del Nano --- .../bootstrap/adaptive_export_deployment.yaml | 4 + .../services/adaptive_export/BUILD.bazel | 2 + .../services/adaptive_export/cmd/main.go | 182 ++++++++++++++++-- .../adaptive_export/internal/config/config.go | 68 +++++-- .../adaptive_export/internal/pixie/pixie.go | 9 - .../adaptive_export/internal/pxl/BUILD.bazel | 30 +++ .../adaptive_export/internal/pxl/pxl.go | 80 ++++++++ 7 files changed, 326 insertions(+), 49 deletions(-) create mode 100644 src/vizier/services/adaptive_export/internal/pxl/BUILD.bazel create mode 100644 src/vizier/services/adaptive_export/internal/pxl/pxl.go diff --git a/k8s/vizier/bootstrap/adaptive_export_deployment.yaml b/k8s/vizier/bootstrap/adaptive_export_deployment.yaml index e076c905a32..c407804b6c1 100644 --- a/k8s/vizier/bootstrap/adaptive_export_deployment.yaml +++ b/k8s/vizier/bootstrap/adaptive_export_deployment.yaml @@ -53,6 +53,10 @@ spec: key: clickhouse-dsn - name: VERBOSE value: "true" + - name: DETECTION_INTERVAL_SEC + value: "10" + - name: DETECTION_LOOKBACK_SEC + value: "30" securityContext: allowPrivilegeEscalation: false capabilities: diff --git a/src/vizier/services/adaptive_export/BUILD.bazel b/src/vizier/services/adaptive_export/BUILD.bazel index 355a1ec2117..4dc4aa3a5f7 100644 --- a/src/vizier/services/adaptive_export/BUILD.bazel +++ b/src/vizier/services/adaptive_export/BUILD.bazel @@ -23,8 +23,10 @@ go_library( importpath = "px.dev/pixie/src/vizier/services/adaptive_export", visibility = ["//visibility:private"], deps = [ + "//src/api/go/pxapi", "//src/vizier/services/adaptive_export/internal/config", "//src/vizier/services/adaptive_export/internal/pixie", + "//src/vizier/services/adaptive_export/internal/pxl", "//src/vizier/services/adaptive_export/internal/script", "@com_github_sirupsen_logrus//:logrus", ], diff --git a/src/vizier/services/adaptive_export/cmd/main.go b/src/vizier/services/adaptive_export/cmd/main.go index bfd39aff56c..b283fe8083b 100644 --- a/src/vizier/services/adaptive_export/cmd/main.go +++ b/src/vizier/services/adaptive_export/cmd/main.go @@ -20,43 +20,180 @@ import ( "context" "fmt" "os" + "os/signal" + "syscall" "time" log "github.com/sirupsen/logrus" + "px.dev/pixie/src/api/go/pxapi" "px.dev/pixie/src/vizier/services/adaptive_export/internal/config" "px.dev/pixie/src/vizier/services/adaptive_export/internal/pixie" + "px.dev/pixie/src/vizier/services/adaptive_export/internal/pxl" "px.dev/pixie/src/vizier/services/adaptive_export/internal/script" ) const ( - defaultRetries = 100 - defaultSleepTime = 15 * time.Second + defaultRetries = 100 + defaultSleepTime = 15 * time.Second + schemaCreationInterval = 2 * time.Minute + setupTimeout = 30 * time.Second + scriptExecutionTimeout = 60 * time.Second +) + +const ( + // TODO(ddelnano): Clickhouse configuration should come from plugin config. + schemaCreationScript = ` +import px +px.display(px.CreateClickHouseSchemas( + host="hyperdx-hdx-oss-v2-clickhouse.click.svc.cluster.local", + port=9000, + username="otelcollector", + password="otelcollectorpass", + database="default" +)) +` + detectionScript = ` +import px + +df = px.DataFrame('kubescape_logs', clickhouse_dsn='otelcollector:otelcollectorpass@hyperdx-hdx-oss-v2-clickhouse.click.svc.cluster.local:9000/default', start_time='-%ds') +df.alert = df.message +df.namespace = px.pluck(df.RuntimeK8sDetails, "podNamespace") +df.podName = px.pluck(df.RuntimeK8sDetails, "podName") +df.time_ = px.int64_to_time(df.event_time * 1000000000) +df = df[['time_', 'alert', 'namespace', 'podName']] +px.display(df) +` ) func main() { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - log.Info("Starting the setup of the ClickHouse Pixie plugin") + log.Info("Starting the ClickHouse Adaptive Export service") cfg, err := config.GetConfig() if err != nil { - log.Error(err) - os.Exit(1) + log.WithError(err).Fatal("failed to load configuration") } clusterId := cfg.Pixie().ClusterID() clusterName := cfg.Worker().ClusterName() - log.Infof("Setting up Pixie plugin for cluster-id %s", clusterId) - client, err := setupPixie(ctx, cfg.Pixie(), defaultRetries, defaultSleepTime) + // Setup Pixie Plugin API client + log.Infof("Setting up Pixie plugin API client for cluster-id %s", clusterId) + pluginClient, err := setupPixie(ctx, cfg.Pixie(), defaultRetries, defaultSleepTime) if err != nil { - log.WithError(err).Fatal("setting up Pixie client failed") + log.WithError(err).Fatal("setting up Pixie plugin client failed") + } + + // Setup Pixie pxapi client for executing PxL scripts + log.Info("Setting up Pixie pxapi client") + // Use parent context - client stores this and uses it for all subsequent operations + pxClient, err := pxapi.NewClient(ctx, pxapi.WithAPIKey(cfg.Pixie().APIKey()), pxapi.WithCloudAddr(cfg.Pixie().Host())) + if err != nil { + log.WithError(err).Fatal("failed to create pxapi client") + } + + // Start schema creation background task + go runSchemaCreationTask(ctx, pxClient, clusterId) + + // Start detection script that monitors for when to enable persistence + go runDetectionTask(ctx, pxClient, pluginClient, cfg, clusterId, clusterName) + + // Wait for signal to shutdown + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + <-sigCh + + log.Info("Shutting down adaptive export service") + cancel() + time.Sleep(1 * time.Second) +} + +func runSchemaCreationTask(ctx context.Context, client *pxapi.Client, clusterID string) { + ticker := time.NewTicker(schemaCreationInterval) + defer ticker.Stop() + + // Run immediately on startup + log.Info("Running schema creation script") + execCtx, cancel := context.WithTimeout(ctx, scriptExecutionTimeout) + if _, err := pxl.ExecuteScript(execCtx, client, clusterID, schemaCreationScript); err != nil { + log.WithError(err).Error("failed to execute schema creation script") + } else { + log.Info("Schema creation script completed successfully") } + cancel() + for { + select { + case <-ctx.Done(): + log.Info("Schema creation task shutting down") + return + case <-ticker.C: + log.Info("Running schema creation script") + execCtx, cancel := context.WithTimeout(ctx, scriptExecutionTimeout) + if _, err := pxl.ExecuteScript(execCtx, client, clusterID, schemaCreationScript); err != nil { + log.WithError(err).Error("failed to execute schema creation script") + } else { + log.Info("Schema creation script completed successfully") + } + cancel() + } + } +} + +func runDetectionTask(ctx context.Context, pxClient *pxapi.Client, pluginClient *pixie.Client, cfg config.Config, clusterID string, clusterName string) { + detectionInterval := time.Duration(cfg.Worker().DetectionInterval()) * time.Second + detectionLookback := cfg.Worker().DetectionLookback() + + ticker := time.NewTicker(detectionInterval) + defer ticker.Stop() + + pluginEnabled := false + + for { + select { + case <-ctx.Done(): + log.Info("Detection task shutting down") + return + case <-ticker.C: + log.Info("Running detection script") + // Run detection script with lookback period + detectionPxl := fmt.Sprintf(detectionScript, detectionLookback) + execCtx, cancel := context.WithTimeout(ctx, scriptExecutionTimeout) + recordCount, err := pxl.ExecuteScript(execCtx, pxClient, clusterID, detectionPxl) + cancel() + + if err != nil { + log.WithError(err).Error("failed to execute detection script") + continue + } + + log.Debugf("Detection script returned %d records", recordCount) + + // If we have records and plugin is not enabled, enable it + if recordCount > 0 && !pluginEnabled { + log.Info("Detection script returned records - enabling forensic export") + pluginCtx, pluginCancel := context.WithTimeout(ctx, 2*time.Minute) + if err := enableClickHousePlugin(pluginCtx, pluginClient, cfg, clusterID, clusterName); err != nil { + log.WithError(err).Error("failed to enable forensic export") + } else { + pluginEnabled = true + log.Info("Forensic export enabled successfully") + } + pluginCancel() + } else if recordCount > 0 && pluginEnabled { + log.Info("Detection script returned records but forensic export already enabled, no action taken") + } + } + } +} + +func enableClickHousePlugin(ctx context.Context, client *pixie.Client, cfg config.Config, clusterID string, clusterName string) error { log.Info("Checking the current ClickHouse plugin configuration") plugin, err := client.GetClickHousePlugin() if err != nil { - log.WithError(err).Fatal("getting data retention plugins failed") + return fmt.Errorf("getting data retention plugins failed: %w", err) } enablePlugin := true @@ -64,7 +201,7 @@ func main() { enablePlugin = false config, err := client.GetClickHousePluginConfig() if err != nil { - log.WithError(err).Fatal("getting ClickHouse plugin config failed") + return fmt.Errorf("getting ClickHouse plugin config failed: %w", err) } if config.ExportUrl != cfg.ClickHouse().DSN() { log.Info("ClickHouse plugin is configured with different DSN... Overwriting") @@ -78,7 +215,7 @@ func main() { ExportUrl: cfg.ClickHouse().DSN(), }, plugin.LatestVersion) if err != nil { - log.WithError(err).Fatal("failed to enabled ClickHouse plugin") + return fmt.Errorf("failed to enable ClickHouse plugin: %w", err) } } @@ -87,20 +224,20 @@ func main() { log.Info("Getting preset script from the Pixie plugin") defsFromPixie, err := client.GetPresetScripts() if err != nil { - log.WithError(err).Fatal("failed to get preset scripts") + return fmt.Errorf("failed to get preset scripts: %w", err) } definitions := defsFromPixie log.Infof("Getting current scripts for cluster") - currentScripts, err := client.GetClusterScripts(clusterId, clusterName) + currentScripts, err := client.GetClusterScripts(clusterID, clusterName) if err != nil { - log.WithError(err).Fatal("failed to get data retention scripts") + return fmt.Errorf("failed to get data retention scripts: %w", err) } actions := script.GetActions(definitions, currentScripts, script.ScriptConfig{ ClusterName: clusterName, - ClusterId: clusterId, + ClusterId: clusterID, CollectInterval: cfg.Worker().CollectInterval(), }) @@ -116,7 +253,7 @@ func main() { for _, s := range actions.ToUpdate { log.Infof("Updating script %s", s.Name) - err := client.UpdateDataRetentionScript(clusterId, s.ScriptId, s.Name, s.Description, s.FrequencyS, s.Script) + err := client.UpdateDataRetentionScript(clusterID, s.ScriptId, s.Name, s.Description, s.FrequencyS, s.Script) if err != nil { errs = append(errs, err) } @@ -124,18 +261,18 @@ func main() { for _, s := range actions.ToCreate { log.Infof("Creating script %s", s.Name) - err := client.AddDataRetentionScript(clusterId, s.Name, s.Description, s.FrequencyS, s.Script) + err := client.AddDataRetentionScript(clusterID, s.Name, s.Description, s.FrequencyS, s.Script) if err != nil { errs = append(errs, err) } } if len(errs) > 0 { - log.Fatalf("errors while setting up data retention scripts: %v", errs) + return fmt.Errorf("errors while setting up data retention scripts: %v", errs) } log.Info("All done! The ClickHouse plugin is now configured.") - os.Exit(0) + return nil } func setupPixie(ctx context.Context, cfg config.Pixie, tries int, sleepTime time.Duration) (*pixie.Client, error) { @@ -144,13 +281,16 @@ func setupPixie(ctx context.Context, cfg config.Pixie, tries int, sleepTime time log.Infof("setupPixie: API Key length=%d, Host=%s", len(apiKey), host) for tries > 0 { + // Use parent context - client stores this and uses it for all subsequent operations client, err := pixie.NewClient(ctx, apiKey, host) if err == nil { return client, nil } tries -= 1 log.WithError(err).Warning("error creating Pixie API client") - time.Sleep(sleepTime) + if tries > 0 { + time.Sleep(sleepTime) + } } return nil, fmt.Errorf("exceeded maximum number of retries") } diff --git a/src/vizier/services/adaptive_export/internal/config/config.go b/src/vizier/services/adaptive_export/internal/config/config.go index 8f777615f3f..fc500359dfe 100644 --- a/src/vizier/services/adaptive_export/internal/config/config.go +++ b/src/vizier/services/adaptive_export/internal/config/config.go @@ -33,16 +33,20 @@ import ( ) const ( - envVerbose = "VERBOSE" - envClickHouseDSN = "CLICKHOUSE_DSN" - envPixieClusterID = "PIXIE_CLUSTER_ID" - envPixieEndpoint = "PIXIE_ENDPOINT" - envPixieAPIKey = "PIXIE_API_KEY" - envClusterName = "CLUSTER_NAME" - envCollectInterval = "COLLECT_INTERVAL_SEC" - defPixieHostname = "work.withpixie.ai:443" - boolTrue = "true" - defCollectInterval = 30 + envVerbose = "VERBOSE" + envClickHouseDSN = "CLICKHOUSE_DSN" + envPixieClusterID = "PIXIE_CLUSTER_ID" + envPixieEndpoint = "PIXIE_ENDPOINT" + envPixieAPIKey = "PIXIE_API_KEY" + envClusterName = "CLUSTER_NAME" + envCollectInterval = "COLLECT_INTERVAL_SEC" + envDetectionInterval = "DETECTION_INTERVAL_SEC" + envDetectionLookback = "DETECTION_LOOKBACK_SEC" + defPixieHostname = "work.withpixie.ai:443" + boolTrue = "true" + defCollectInterval = 30 + defDetectionInterval = 10 + defDetectionLookback = 15 ) var ( @@ -138,9 +142,6 @@ func GetConfig() (Config, error) { func setUpConfig() error { log.SetLevel(log.InfoLevel) - if strings.EqualFold(os.Getenv(envVerbose), boolTrue) { - log.SetLevel(log.DebugLevel) - } // Try to read configuration from environment variables first clickhouseDSN := os.Getenv(envClickHouseDSN) @@ -148,6 +149,11 @@ func setUpConfig() error { pixieAPIKey := os.Getenv(envPixieAPIKey) clusterName := os.Getenv(envClusterName) pixieHost := getEnvWithDefault(envPixieEndpoint, defPixieHostname) + enableDebug := os.Getenv(envVerbose) + + if strings.EqualFold(enableDebug, boolTrue) { + log.SetLevel(log.DebugLevel) + } log.Debugf("Config from environment - ClickHouse DSN: %s", clickhouseDSN) log.Debugf("Config from environment - Pixie Cluster ID: %s", pixieClusterID) @@ -190,6 +196,16 @@ func setUpConfig() error { return err } + detectionInterval, err := getIntEnvWithDefault(envDetectionInterval, defDetectionInterval) + if err != nil { + return err + } + + detectionLookback, err := getIntEnvWithDefault(envDetectionLookback, defDetectionLookback) + if err != nil { + return err + } + instance = &config{ settings: &settings{ buildDate: buildDate, @@ -197,9 +213,11 @@ func setUpConfig() error { version: integrationVersion, }, worker: &worker{ - clusterName: clusterName, - pixieClusterID: pixieClusterID, - collectInterval: collectInterval, + clusterName: clusterName, + pixieClusterID: pixieClusterID, + collectInterval: collectInterval, + detectionInterval: detectionInterval, + detectionLookback: detectionLookback, }, clickhouse: &clickhouse{ dsn: clickhouseDSN, @@ -370,13 +388,17 @@ type Worker interface { ClusterName() string PixieClusterID() string CollectInterval() int64 + DetectionInterval() int64 + DetectionLookback() int64 validate() error } type worker struct { - clusterName string - pixieClusterID string - collectInterval int64 + clusterName string + pixieClusterID string + collectInterval int64 + detectionInterval int64 + detectionLookback int64 } func (a *worker) validate() error { @@ -397,3 +419,11 @@ func (a *worker) PixieClusterID() string { func (a *worker) CollectInterval() int64 { return a.collectInterval } + +func (a *worker) DetectionInterval() int64 { + return a.detectionInterval +} + +func (a *worker) DetectionLookback() int64 { + return a.detectionLookback +} diff --git a/src/vizier/services/adaptive_export/internal/pixie/pixie.go b/src/vizier/services/adaptive_export/internal/pixie/pixie.go index bb761fc631d..97e5bb8ae23 100644 --- a/src/vizier/services/adaptive_export/internal/pixie/pixie.go +++ b/src/vizier/services/adaptive_export/internal/pixie/pixie.go @@ -47,7 +47,6 @@ type Client struct { } func NewClient(ctx context.Context, apiKey string, cloudAddr string) (*Client, error) { - fmt.Printf("DEBUG: NewClient called with apiKey length: %d, cloudAddr: %s\n", len(apiKey), cloudAddr) if apiKey == "" { fmt.Println("WARNING: API key is empty!") } @@ -57,14 +56,6 @@ func NewClient(ctx context.Context, apiKey string, cloudAddr string) (*Client, e ctx: metadata.AppendToOutgoingContext(ctx, "pixie-api-key", apiKey), } - // Debug: check what's in the context - md, ok := metadata.FromOutgoingContext(c.ctx) - if ok { - fmt.Printf("DEBUG: Context metadata: %v\n", md) - } else { - fmt.Println("WARNING: No metadata in context!") - } - if err := c.init(); err != nil { return nil, err } diff --git a/src/vizier/services/adaptive_export/internal/pxl/BUILD.bazel b/src/vizier/services/adaptive_export/internal/pxl/BUILD.bazel new file mode 100644 index 00000000000..80afa3f2875 --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/pxl/BUILD.bazel @@ -0,0 +1,30 @@ +# Copyright 2018- The Pixie Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "pxl", + srcs = ["pxl.go"], + importpath = "px.dev/pixie/src/vizier/services/adaptive_export/internal/pxl", + visibility = ["//src/vizier/services/adaptive_export:__subpackages__"], + deps = [ + "//src/api/go/pxapi", + "//src/api/go/pxapi/errdefs", + "//src/api/go/pxapi/types", + "@com_github_sirupsen_logrus//:logrus", + ], +) diff --git a/src/vizier/services/adaptive_export/internal/pxl/pxl.go b/src/vizier/services/adaptive_export/internal/pxl/pxl.go new file mode 100644 index 00000000000..e4e27a40b6b --- /dev/null +++ b/src/vizier/services/adaptive_export/internal/pxl/pxl.go @@ -0,0 +1,80 @@ +// Copyright 2018- The Pixie Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package pxl + +import ( + "context" + "fmt" + + log "github.com/sirupsen/logrus" + "px.dev/pixie/src/api/go/pxapi" + "px.dev/pixie/src/api/go/pxapi/errdefs" + "px.dev/pixie/src/api/go/pxapi/types" +) + +// recordCounter counts the number of records received +type recordCounter struct { + count int +} + +func (r *recordCounter) HandleInit(ctx context.Context, metadata types.TableMetadata) error { + return nil +} + +func (r *recordCounter) HandleRecord(ctx context.Context, record *types.Record) error { + r.count++ + return nil +} + +func (r *recordCounter) HandleDone(ctx context.Context) error { + return nil +} + +type recordCounterMux struct { + counter *recordCounter +} + +func (m *recordCounterMux) AcceptTable(ctx context.Context, metadata types.TableMetadata) (pxapi.TableRecordHandler, error) { + return m.counter, nil +} + +// ExecuteScript executes a PxL script and returns the number of records returned +func ExecuteScript(ctx context.Context, client *pxapi.Client, clusterID string, pxl string) (int, error) { + vz, err := client.NewVizierClient(ctx, clusterID) + if err != nil { + return 0, fmt.Errorf("failed to create vizier client: %w", err) + } + + counter := &recordCounter{} + tm := &recordCounterMux{counter: counter} + + resultSet, err := vz.ExecuteScript(ctx, pxl, tm) + if err != nil { + return 0, fmt.Errorf("failed to execute script: %w", err) + } + defer resultSet.Close() + + if err := resultSet.Stream(); err != nil { + if errdefs.IsCompilationError(err) { + return 0, fmt.Errorf("PxL compilation error: %w", err) + } + return 0, fmt.Errorf("error streaming results: %w", err) + } + + log.Debugf("Script execution time: %v, bytes received: %v", resultSet.Stats().ExecutionTime, resultSet.Stats().TotalBytes) + return counter.count, nil +} From 0f066fda8344fb12030b32ade8b3859a2f2c505c Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Mon, 10 Nov 2025 00:24:24 +0000 Subject: [PATCH 84/86] Add placeholder value for later sed Signed-off-by: Dom Del Nano --- k8s/vizier/bootstrap/adaptive_export_secrets.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s/vizier/bootstrap/adaptive_export_secrets.yaml b/k8s/vizier/bootstrap/adaptive_export_secrets.yaml index 92699282f6d..19be138743b 100644 --- a/k8s/vizier/bootstrap/adaptive_export_secrets.yaml +++ b/k8s/vizier/bootstrap/adaptive_export_secrets.yaml @@ -6,6 +6,6 @@ metadata: type: Opaque stringData: # Replace with your actual Pixie API key from https://work.withpixie.ai - pixie-api-key: "px-api-8c552c92-962d-4d9e-be6a-4fe310144497" + pixie-api-key: "PIXIE_API_KEY_PLACEHOLDER" # Replace with your ClickHouse DSN: clickhouse://user:password@host:port/database clickhouse-dsn: "otelcollector:otelcollectorpass@hyperdx-hdx-oss-v2-clickhouse.click.svc.cluster.local:9000/default" From 50f0cdcbc3b44b29e8cb62749f9112ea284fe709 Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Tue, 3 Feb 2026 19:02:12 -0800 Subject: [PATCH 85/86] Remove file source, sink results and other vestigial changes Signed-off-by: Dom Del Nano --- .arclint | 1 - BUILD.bazel | 17 - demos/log-generator/log-generator.yaml | 89 - .../operator/opensearch_operator.yaml | 8850 ----------------- .../standalone_pem_example/example.go | 66 +- src/carnot/BUILD.bazel | 2 + src/carnot/carnot_test.cc | 2 +- src/carnot/exec/BUILD.bazel | 2 - src/carnot/exec/exec_graph_test.cc | 245 +- src/carnot/exec/exec_node.h | 121 +- src/carnot/exec/exec_state.h | 12 +- src/carnot/exec/grpc_sink_node_benchmark.cc | 3 +- src/carnot/exec/grpc_sink_node_test.cc | 22 +- src/carnot/exec/memory_sink_node.cc | 3 +- src/carnot/exec/memory_sink_node_test.cc | 6 +- src/carnot/exec/memory_source_node.cc | 6 +- src/carnot/exec/memory_source_node.h | 2 +- src/carnot/exec/memory_source_node_test.cc | 12 +- src/carnot/exec/otel_export_sink_node.cc | 3 +- src/carnot/exec/otel_export_sink_node_test.cc | 39 +- src/carnot/exec/test_utils.h | 10 +- src/carnot/funcs/builtins/builtins.cc | 2 - src/carnot/funcs/builtins/pipeline_ops.cc | 39 - src/carnot/funcs/builtins/pipeline_ops.h | 83 - src/carnot/plan/operators.cc | 24 +- src/carnot/plan/operators.h | 1 + src/carnot/planner/cgo_export_test.cc | 37 - src/carnot/planner/compiler/BUILD.bazel | 1 - src/carnot/planner/compiler/ast_visitor.cc | 2 - src/carnot/planner/compiler/ast_visitor.h | 1 - src/carnot/planner/compiler/test_utils.h | 8 - .../distributed/coordinator/coordinator.cc | 7 - .../coordinator/coordinator_test.cc | 81 +- .../prune_unavailable_sources_rule.cc | 3 +- .../distributed_plan/distributed_plan.cc | 4 - .../distributed/distributed_planner_test.cc | 72 - .../distributed_stitcher_rules_test.cc | 62 +- .../distributedpb/distributed_plan.pb.go | 757 +- src/carnot/planner/file_source/BUILD.bazel | 52 - src/carnot/planner/file_source/file_source.cc | 27 - src/carnot/planner/file_source/file_source.h | 37 - .../planner/file_source/file_source_test.cc | 91 - src/carnot/planner/file_source/ir/BUILD.bazel | 41 - .../planner/file_source/ir/logical.pb.go | 567 -- .../planner/file_source/ir/logical.proto | 39 - src/carnot/planner/file_source/log_module.cc | 104 - src/carnot/planner/file_source/log_module.h | 69 - .../planner/ir/clickhouse_export_sink_ir.cc | 1 - .../planner/ir/clickhouse_export_sink_ir.h | 5 +- src/carnot/planner/ir/grpc_sink_ir.cc | 3 - src/carnot/planner/ir/grpc_sink_ir.h | 16 +- src/carnot/planner/ir/ir.h | 20 +- src/carnot/planner/ir/memory_sink_ir.cc | 2 - src/carnot/planner/ir/memory_sink_ir.h | 5 +- src/carnot/planner/ir/memory_source_ir.cc | 1 - src/carnot/planner/ir/memory_source_ir.h | 4 +- src/carnot/planner/ir/operator_ir.h | 34 - src/carnot/planner/ir/otel_export_sink_ir.cc | 71 - src/carnot/planner/ir/otel_export_sink_ir.h | 17 +- .../planner/ir/otel_export_sink_ir_test.cc | 133 - src/carnot/planner/logical_planner_test.cc | 6 - src/carnot/planner/objects/BUILD.bazel | 1 - src/carnot/planner/objects/dataframe.cc | 34 +- src/carnot/planner/objects/dataframe.h | 23 +- src/carnot/planner/objects/otel.cc | 79 +- src/carnot/planner/objects/otel.h | 54 +- src/carnot/planner/objects/otel_test.cc | 97 - src/carnot/planner/plannerpb/BUILD.bazel | 3 - src/carnot/planner/plannerpb/service.pb.go | 1255 +-- src/carnot/planner/plannerpb/service.proto | 10 - src/carnot/planner/probes/BUILD.bazel | 1 - src/carnot/planner/probes/probes.cc | 28 - src/carnot/planner/probes/probes.h | 35 - .../planner/probes/tracepoint_generator.cc | 18 +- .../planner/probes/tracepoint_generator.h | 6 - src/carnot/planner/test_utils.h | 312 - src/carnot/planpb/plan.pb.go | 4424 +++++--- src/carnot/planpb/plan.proto | 1 - src/carnot/planpb/test_proto.h | 8 - src/common/json/json.h | 21 - src/experimental/standalone_pem/BUILD.bazel | 1 - .../standalone_pem/file_source_manager.cc | 195 - .../standalone_pem/file_source_manager.h | 71 - .../standalone_pem/standalone_pem_manager.cc | 37 +- .../standalone_pem/standalone_pem_manager.h | 8 - .../standalone_pem/tracepoint_manager.cc | 5 +- .../standalone_pem/vizier_server.h | 41 +- .../px/pipeline_flow_graph/manifest.yaml | 4 - .../pipeline_flow_graph.pxl | 82 - .../px/pipeline_flow_graph/vis.json | 49 - src/shared/metadata/metadata_state.cc | 2 +- src/shared/metadata/metadata_state.h | 5 +- .../metadata/standalone_state_manager.h | 4 +- src/shared/metadata/state_manager.h | 4 +- src/shared/schema/utils.cc | 12 +- src/shared/schema/utils.h | 7 +- src/stirling/BUILD.bazel | 1 - src/stirling/core/BUILD.bazel | 1 - src/stirling/core/info_class_manager.cc | 6 +- src/stirling/core/info_class_manager.h | 10 - src/stirling/core/info_class_manager_test.cc | 1 - src/stirling/core/source_connector.cc | 4 +- src/stirling/proto/stirling.proto | 1 - .../source_connectors/file_source/BUILD.bazel | 60 - .../file_source/file_source_connector.cc | 287 - .../file_source/file_source_connector.h | 87 - .../file_source/file_source_connector_test.cc | 82 - .../file_source/stirling_fs_test.cc | 225 - .../file_source/testdata/kern.log | 5 - .../file_source/testdata/test.json | 10 - .../file_source/testdata/unsupported.json | 1 - .../socket_tracer/BUILD.bazel | 2 +- .../testing/container_images/BUILD.bazel | 4 +- .../stirling_error/BUILD.bazel | 3 +- .../stirling_error/sink_results_table.h | 51 - .../stirling_error/stirling_error_bpf_test.cc | 92 - .../stirling_error_connector.cc | 21 +- .../stirling_error/stirling_error_connector.h | 7 +- .../stirling_error/stream_status_table.h | 51 - .../stirling_error/testdata/test.json | 10 - .../stirling_error/testdata/unsupported.json | 1 - src/stirling/stirling.cc | 186 +- src/stirling/stirling.h | 4 - src/stirling/testing/common.h | 2 +- src/stirling/testing/overloads.h | 12 +- src/stirling/testing/stirling_mock.h | 5 - src/stirling/utils/monitor.cc | 11 - src/stirling/utils/monitor.h | 14 - src/table_store/schema/relation.cc | 14 - src/table_store/schema/relation.h | 4 - src/table_store/schemapb/schema.pb.go | 165 +- src/table_store/schemapb/schema.proto | 2 - .../internal/store_with_row_accounting.h | 44 +- src/table_store/table/table.cc | 310 +- src/table_store/table/table.h | 571 +- src/table_store/table/table_benchmark.cc | 32 +- src/table_store/table/table_store.cc | 2 +- src/table_store/table/table_store_test.cc | 10 +- src/table_store/table/table_test.cc | 343 +- src/table_store/table/tablets_group.cc | 2 +- src/table_store/table/tablets_group_test.cc | 4 +- src/table_store/test_utils.h | 2 +- src/ui/src/utils/pxl.ts | 2 - src/vizier/funcs/context/vizier_context.h | 9 +- src/vizier/funcs/md_udtfs/md_udtfs.cc | 2 - src/vizier/funcs/md_udtfs/md_udtfs_impl.h | 154 +- src/vizier/messages/messagespb/BUILD.bazel | 3 - src/vizier/messages/messagespb/messages.pb.go | 2060 +--- src/vizier/messages/messagespb/messages.proto | 32 - .../services/agent/kelvin/kelvin_manager.h | 1 - .../services/agent/pem/file_source_manager.cc | 234 - .../services/agent/pem/file_source_manager.h | 73 - src/vizier/services/agent/pem/pem_manager.cc | 25 +- src/vizier/services/agent/pem/pem_manager.h | 2 - .../services/agent/pem/tracepoint_manager.cc | 6 +- .../agent/pem/tracepoint_manager_test.cc | 12 +- .../services/agent/shared/manager/BUILD.bazel | 1 - .../agent/shared/manager/chan_cache.h | 2 +- .../agent/shared/manager/heartbeat.cc | 3 +- .../services/agent/shared/manager/heartbeat.h | 21 - .../agent/shared/manager/heartbeat_test.cc | 6 +- .../services/agent/shared/manager/manager.cc | 18 +- .../services/agent/shared/manager/manager.h | 2 - .../shared/manager/relation_info_manager.cc | 3 - .../shared/manager/relation_info_manager.h | 3 +- .../manager/relation_info_manager_test.cc | 8 +- src/vizier/services/metadata/BUILD.bazel | 1 - .../services/metadata/controllers/BUILD.bazel | 3 - .../controllers/agent_topic_listener.go | 44 +- .../controllers/agent_topic_listener_test.go | 102 +- .../controllers/file_source/BUILD.bazel | 74 - .../controllers/file_source/file_source.go | 375 - .../file_source/file_source_store.go | 309 - .../file_source/file_source_store_test.go | 364 - .../file_source/file_source_test.go | 528 - .../metadata/controllers/file_source/mock.go | 21 - .../controllers/file_source/mock/BUILD.bazel | 29 - .../file_source/mock/mock_file_source.gen.go | 277 - .../metadata/controllers/message_bus.go | 12 +- .../services/metadata/controllers/server.go | 186 +- .../metadata/controllers/server_test.go | 55 +- .../services/metadata/local/BUILD.bazel | 33 - .../metadata/local/local_metadata_service.h | 222 - .../services/metadata/metadata_server.go | 11 +- .../services/metadata/metadatapb/BUILD.bazel | 15 +- .../metadata/metadatapb/service.pb.go | 5938 ++++------- .../metadata/metadatapb/service.proto | 64 - .../services/metadata/storepb/BUILD.bazel | 3 - .../services/metadata/storepb/store.pb.go | 1141 +-- .../services/metadata/storepb/store.proto | 25 - .../query_broker/controllers/BUILD.bazel | 1 - .../query_broker/controllers/errors.go | 4 - .../controllers/mutation_executor.go | 119 +- .../controllers/query_executor.go | 7 +- .../controllers/query_executor_test.go | 4 +- .../query_broker/controllers/server.go | 10 +- .../query_broker/controllers/server_test.go | 12 +- .../query_broker/query_broker_server.go | 3 +- .../query_broker/tracker/agents_info.go | 6 +- .../services/shared/agentpb/agent.pb.go | 159 +- .../services/shared/agentpb/agent.proto | 1 - vizier-chart/Chart.yaml | 4 - vizier-chart/helm-install.sh | 40 - vizier-chart/templates/00_secrets.yaml | 100 - vizier-chart/templates/01_nats.yaml | 246 - vizier-chart/templates/02_etcd.yaml | 238 - vizier-chart/templates/03_vizier_etcd.yaml | 2309 ----- .../templates/04_vizier_persistent.yaml | 2343 ----- vizier-chart/templates/05_vizier_etcd_ap.yaml | 2330 ----- .../templates/06_vizier_persistent_ap.yaml | 2364 ----- vizier-chart/values.yaml | 7 - 211 files changed, 7431 insertions(+), 37131 deletions(-) delete mode 100644 demos/log-generator/log-generator.yaml delete mode 100644 k8s/cloud_deps/base/opensearch/operator/opensearch_operator.yaml delete mode 100644 src/carnot/funcs/builtins/pipeline_ops.cc delete mode 100644 src/carnot/funcs/builtins/pipeline_ops.h delete mode 100644 src/carnot/planner/file_source/BUILD.bazel delete mode 100644 src/carnot/planner/file_source/file_source.cc delete mode 100644 src/carnot/planner/file_source/file_source.h delete mode 100644 src/carnot/planner/file_source/file_source_test.cc delete mode 100644 src/carnot/planner/file_source/ir/BUILD.bazel delete mode 100755 src/carnot/planner/file_source/ir/logical.pb.go delete mode 100644 src/carnot/planner/file_source/ir/logical.proto delete mode 100644 src/carnot/planner/file_source/log_module.cc delete mode 100644 src/carnot/planner/file_source/log_module.h delete mode 100644 src/experimental/standalone_pem/file_source_manager.cc delete mode 100644 src/experimental/standalone_pem/file_source_manager.h delete mode 100644 src/pxl_scripts/px/pipeline_flow_graph/manifest.yaml delete mode 100644 src/pxl_scripts/px/pipeline_flow_graph/pipeline_flow_graph.pxl delete mode 100644 src/pxl_scripts/px/pipeline_flow_graph/vis.json delete mode 100644 src/stirling/source_connectors/file_source/BUILD.bazel delete mode 100644 src/stirling/source_connectors/file_source/file_source_connector.cc delete mode 100644 src/stirling/source_connectors/file_source/file_source_connector.h delete mode 100644 src/stirling/source_connectors/file_source/file_source_connector_test.cc delete mode 100644 src/stirling/source_connectors/file_source/stirling_fs_test.cc delete mode 100644 src/stirling/source_connectors/file_source/testdata/kern.log delete mode 100644 src/stirling/source_connectors/file_source/testdata/test.json delete mode 100644 src/stirling/source_connectors/file_source/testdata/unsupported.json delete mode 100644 src/stirling/source_connectors/stirling_error/sink_results_table.h delete mode 100644 src/stirling/source_connectors/stirling_error/stream_status_table.h delete mode 100644 src/stirling/source_connectors/stirling_error/testdata/test.json delete mode 100644 src/stirling/source_connectors/stirling_error/testdata/unsupported.json delete mode 100644 src/vizier/services/agent/pem/file_source_manager.cc delete mode 100644 src/vizier/services/agent/pem/file_source_manager.h delete mode 100644 src/vizier/services/metadata/controllers/file_source/BUILD.bazel delete mode 100644 src/vizier/services/metadata/controllers/file_source/file_source.go delete mode 100644 src/vizier/services/metadata/controllers/file_source/file_source_store.go delete mode 100644 src/vizier/services/metadata/controllers/file_source/file_source_store_test.go delete mode 100644 src/vizier/services/metadata/controllers/file_source/file_source_test.go delete mode 100644 src/vizier/services/metadata/controllers/file_source/mock.go delete mode 100644 src/vizier/services/metadata/controllers/file_source/mock/BUILD.bazel delete mode 100644 src/vizier/services/metadata/controllers/file_source/mock/mock_file_source.gen.go delete mode 100644 src/vizier/services/metadata/local/BUILD.bazel delete mode 100644 src/vizier/services/metadata/local/local_metadata_service.h delete mode 100644 vizier-chart/Chart.yaml delete mode 100644 vizier-chart/helm-install.sh delete mode 100644 vizier-chart/templates/00_secrets.yaml delete mode 100644 vizier-chart/templates/01_nats.yaml delete mode 100644 vizier-chart/templates/02_etcd.yaml delete mode 100644 vizier-chart/templates/03_vizier_etcd.yaml delete mode 100644 vizier-chart/templates/04_vizier_persistent.yaml delete mode 100644 vizier-chart/templates/05_vizier_etcd_ap.yaml delete mode 100644 vizier-chart/templates/06_vizier_persistent_ap.yaml delete mode 100644 vizier-chart/values.yaml diff --git a/.arclint b/.arclint index 15085b76eb4..7b87106fe80 100644 --- a/.arclint +++ b/.arclint @@ -23,7 +23,6 @@ "(^src/stirling/bpf_tools/bcc_bpf/system-headers)", "(^src/stirling/mysql/testing/.*\\.json$)", "(^src/stirling/obj_tools/testdata/go/test_go_binary.go)", - "(^src/stirling/source_connectors/file_source/testdata/test.json$)", "(^src/stirling/source_connectors/socket_tracer/protocols/http2/testing/go_grpc_client/main.go$)", "(^src/stirling/source_connectors/socket_tracer/protocols/http2/testing/go_grpc_server/main.go$)", "(^src/stirling/utils/testdata/config$)", diff --git a/BUILD.bazel b/BUILD.bazel index 874f7e13e5e..177a71158a6 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,6 +1,3 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@px//bazel:pl_build_system.bzl", "pl_go_binary") - # Copyright 2018- The Pixie Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -59,7 +56,6 @@ gazelle( # gazelle:resolve go px.dev/pixie/src/carnot/docspb //src/carnot/docspb:docs_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planner/compilerpb //src/carnot/planner/compilerpb:compiler_status_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planner/distributedpb //src/carnot/planner/distributedpb:distributed_plan_pl_go_proto -# gazelle:resolve go px.dev/pixie/src/carnot/planner/file_source/ir //src/carnot/planner/file_source/ir:logical_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb //src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planner/plannerpb //src/carnot/planner/plannerpb:service_pl_go_proto # gazelle:resolve go px.dev/pixie/src/carnot/planpb //src/carnot/planpb:plan_pl_go_proto @@ -220,16 +216,3 @@ filegroup( srcs = ["go.sum"], visibility = ["//visibility:public"], ) - -go_library( - name = "pixie_lib", - srcs = ["gosym_tab_experiment.go"], - importpath = "px.dev/pixie", - visibility = ["//visibility:private"], -) - -pl_go_binary( - name = "pixie", - embed = [":pixie_lib"], - visibility = ["//visibility:public"], -) diff --git a/demos/log-generator/log-generator.yaml b/demos/log-generator/log-generator.yaml deleted file mode 100644 index ac05a56118b..00000000000 --- a/demos/log-generator/log-generator.yaml +++ /dev/null @@ -1,89 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: px-log-generator ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: vector-config - namespace: px-log-generator -data: - vector.toml: | - [sources.demo] - type = "demo_logs" - format = "json" - - [sinks.json_output] - type = "file" - inputs = ["demo"] - path = "/var/log/px-log-generator.json" - encoding.codec = "json" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: vector-logrotate-config - namespace: px-log-generator -data: - logrotate.conf: | - /var/log/px-log-generator.json { - size 30M - copytruncate - rotate 5 - compress - missingok - notifempty - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: vector - namespace: px-log-generator -spec: - selector: - matchLabels: - app: vector - template: - metadata: - labels: - app: vector - spec: - volumes: - - name: log-storage - hostPath: - path: /var/log - type: Directory - - name: logrotate-config - configMap: - name: vector-logrotate-config - - name: config-volume - configMap: - name: vector-config - initContainers: - - name: cleanup - image: busybox - command: ["/bin/sh", "-c", "truncate -s0 /var/log/px-log-generator.json"] - volumeMounts: - - name: log-storage - mountPath: /var/log - containers: - - name: vector - image: timberio/vector@sha256:f8933ff1a3ec08df45abc6130947938d98dc85792a25592ec1aa6fe83a7f562c # 0.44.0-debian - args: ["--config", "/etc/vector/vector.toml"] - volumeMounts: - - name: config-volume - mountPath: /etc/vector - - name: log-storage - mountPath: /var/log - - name: logrotate - image: vitess/logrotate@sha256:ba0f99827d0e2d0bda86230ff6666e75383d93babcbc6c803c4d41396214f312 # v21.0.2-bookworm - volumeMounts: - - name: logrotate-config - mountPath: /vt/logrotate.conf - subPath: logrotate.conf - - name: log-storage - mountPath: /var/log - terminationGracePeriodSeconds: 10 - restartPolicy: Always diff --git a/k8s/cloud_deps/base/opensearch/operator/opensearch_operator.yaml b/k8s/cloud_deps/base/opensearch/operator/opensearch_operator.yaml deleted file mode 100644 index fa57525b2c6..00000000000 --- a/k8s/cloud_deps/base/opensearch/operator/opensearch_operator.yaml +++ /dev/null @@ -1,8850 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: opensearch-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchactiongroups.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpensearchActionGroup - listKind: OpensearchActionGroupList - plural: opensearchactiongroups - shortNames: - - opensearchactiongroup - singular: opensearchactiongroup - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: OpensearchActionGroup is the Schema for the opensearchactiongroups - API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: OpensearchActionGroupSpec defines the desired state of OpensearchActionGroup - properties: - allowedActions: - items: - type: string - type: array - description: - type: string - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: - type: string - required: - - allowedActions - - opensearchCluster - type: object - status: - description: OpensearchActionGroupStatus defines the observed state of - OpensearchActionGroup - properties: - existingActionGroup: - type: boolean - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchclusters.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpenSearchCluster - listKind: OpenSearchClusterList - plural: opensearchclusters - shortNames: - - os - - opensearch - singular: opensearchcluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.health - name: health - type: string - - description: Available nodes - jsonPath: .status.availableNodes - name: nodes - type: integer - - description: Opensearch version - jsonPath: .status.version - name: version - type: string - - jsonPath: .status.phase - name: phase - type: string - - jsonPath: .metadata.creationTimestamp - name: age - type: date - name: v1 - schema: - openAPIV3Schema: - description: Es is the Schema for the es API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ClusterSpec defines the desired state of OpenSearchCluster - properties: - bootstrap: - properties: - additionalConfig: - additionalProperties: - type: string - description: Extra items to add to the opensearch.yml, defaults - to General.AdditionalConfig - type: object - affinity: - description: Affinity is a group of affinity scheduling rules. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for - the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with - the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching the - corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-type: atomic - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - type: object - jvm: - type: string - keystore: - items: - properties: - keyMappings: - additionalProperties: - type: string - description: Key mappings from secret to keystore keys - type: object - secret: - description: Secret containing key value pairs - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: object - type: array - nodeSelector: - additionalProperties: - type: string - type: object - pluginsList: - items: - type: string - type: array - resources: - description: ResourceRequirements describes the compute resource - requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - tolerations: - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - type: object - confMgmt: - description: ConfMgmt defines which additional services will be deployed - properties: - VerUpdate: - type: boolean - autoScaler: - type: boolean - smartScaler: - type: boolean - type: object - dashboards: - properties: - additionalConfig: - additionalProperties: - type: string - description: Additional properties for opensearch_dashboards.yaml - type: object - additionalVolumes: - items: - properties: - configMap: - description: ConfigMap to use to populate the volume - properties: - defaultMode: - description: |- - defaultMode is optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: CSI object to use to populate the volume - properties: - driver: - description: |- - driver is the name of the CSI driver that handles this volume. - Consult with your admin for the correct name as registered in the cluster. - type: string - fsType: - description: |- - fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated CSI driver - which will determine the default filesystem to apply. - type: string - nodePublishSecretRef: - description: |- - nodePublishSecretRef is a reference to the secret object containing - sensitive information to pass to the CSI driver to complete the CSI - NodePublishVolume and NodeUnpublishVolume calls. - This field is optional, and may be empty if no secret is required. If the - secret object contains more than one secret, all secret references are passed. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: |- - readOnly specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: |- - volumeAttributes stores driver-specific properties that are passed to the CSI - driver. Consult your driver's documentation for supported values. - type: object - required: - - driver - type: object - emptyDir: - description: EmptyDir to use to populate the volume - properties: - medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - name: - description: Name to use for the volume. Required. - type: string - path: - description: Path in the container to mount the volume at. - Required. - type: string - projected: - description: Projected object to use to populate the volume - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: |- - sources is the list of volume projections. Each entry in this list - handles one source. - items: - description: |- - Projection that may be projected along with other supported volume types. - Exactly one of these fields must be set. - properties: - clusterTrustBundle: - description: |- - ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - of ClusterTrustBundle objects in an auto-updating file. - - Alpha, gated by the ClusterTrustBundleProjection feature gate. - - ClusterTrustBundle objects can either be selected by name, or by the - combination of signer name and a label selector. - - Kubelet performs aggressive normalization of the PEM contents written - into the pod filesystem. Esoteric PEM features such as inter-block - comments and block headers are stripped. Certificates are deduplicated. - The ordering of certificates within the file is arbitrary, and Kubelet - may change the order over time. - properties: - labelSelector: - description: |- - Select all ClusterTrustBundles that match this label selector. Only has - effect if signerName is set. Mutually-exclusive with name. If unset, - interpreted as "match nothing". If set but empty, interpreted as "match - everything". - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - name: - description: |- - Select a single ClusterTrustBundle by object name. Mutually-exclusive - with signerName and labelSelector. - type: string - optional: - description: |- - If true, don't block pod startup if the referenced ClusterTrustBundle(s) - aren't available. If using name, then the named ClusterTrustBundle is - allowed not to exist. If using signerName, then the combination of - signerName and labelSelector is allowed to match zero - ClusterTrustBundles. - type: boolean - path: - description: Relative path from the volume - root to write the bundle. - type: string - signerName: - description: |- - Select all ClusterTrustBundles that match this signer name. - Mutually-exclusive with name. The contents of all selected - ClusterTrustBundles will be unified and deduplicated. - type: string - required: - - path - type: object - configMap: - description: configMap information about the configMap - data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path - within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether the - ConfigMap or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about the - downwardAPI data to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects a field - of the pod: only annotations, labels, - name, namespace and uid are supported.' - properties: - apiVersion: - description: Version of the schema - the FieldPath is written in terms - of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to - select in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the - relative path name of the file to - be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 - encoded. The first item of the relative - path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env - vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - x-kubernetes-list-type: atomic - type: object - secret: - description: secret information about the secret - data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path - within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional field specify whether - the Secret or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information - about the serviceAccountToken data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - restartPods: - description: Whether to restart the pods on content change - type: boolean - secret: - description: Secret to use populate the volume - properties: - defaultMode: - description: |- - defaultMode is Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values - for mode bits. Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items If unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - optional: - description: optional field specify whether the Secret - or its keys must be defined - type: boolean - secretName: - description: |- - secretName is the name of the secret in the pod's namespace to use. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - type: string - type: object - subPath: - description: SubPath of the referenced volume to mount. - type: string - required: - - name - - path - type: object - type: array - affinity: - description: Affinity is a group of affinity scheduling rules. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for - the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with - the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching the - corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-type: atomic - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - basePath: - description: Base Path for Opensearch Clusters running behind - a reverse proxy - type: string - enable: - type: boolean - env: - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image - type: string - imagePullSecrets: - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - labels: - additionalProperties: - type: string - type: object - nodeSelector: - additionalProperties: - type: string - type: object - opensearchCredentialsSecret: - description: Secret that contains fields username and password - for dashboards to use to login to opensearch, must only be supplied - if a custom securityconfig is provided - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - pluginsList: - items: - type: string - type: array - podSecurityContext: - description: Set security context for the dashboards pods - properties: - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - fsGroup: - description: |- - A special supplemental group that applies to all containers in a pod. - Some volume types allow the Kubelet to change the ownership of that volume - to be owned by the pod: - - 1. The owning GID will be the FSGroup - 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - fsGroupChangePolicy: - description: |- - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - before being exposed inside Pod. This field will only apply to - volume types which support fsGroup based ownership(and permissions). - It will have no effect on ephemeral volume types such as: secret, configmaps - and emptydir. - Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name is windows. - type: string - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxChangePolicy: - description: |- - seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. - It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. - Valid values are "MountOption" and "Recursive". - - "Recursive" means relabeling of all files on all Pod volumes by the container runtime. - This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. - - "MountOption" mounts all eligible Pod volumes with `-o context` mount option. - This requires all Pods that share the same volume to use the same SELinux label. - It is not possible to share the same volume among privileged and unprivileged Pods. - Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes - whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. - "MountOption" value is allowed only when SELinuxMount feature gate is enabled. - - If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. - If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes - and "Recursive" for all other volumes. - - This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. - - All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. - Note that this field cannot be set when spec.os.name is windows. - type: string - seLinuxOptions: - description: |- - The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in SecurityContext. If set in - both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - supplementalGroups: - description: |- - A list of groups applied to the first process run in each container, in - addition to the container's primary GID and fsGroup (if specified). If - the SupplementalGroupsPolicy feature is enabled, the - supplementalGroupsPolicy field determines whether these are in addition - to or instead of any group memberships defined in the container image. - If unspecified, no additional groups are added, though group memberships - defined in the container image may still be used, depending on the - supplementalGroupsPolicy field. - Note that this field cannot be set when spec.os.name is windows. - items: - format: int64 - type: integer - type: array - x-kubernetes-list-type: atomic - supplementalGroupsPolicy: - description: |- - Defines how supplemental groups of the first container processes are calculated. - Valid values are "Merge" and "Strict". If not specified, "Merge" is used. - (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled - and the container runtime must implement support for this feature. - Note that this field cannot be set when spec.os.name is windows. - type: string - sysctls: - description: |- - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - sysctls (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name is windows. - items: - description: Sysctl defines a kernel parameter to be set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - replicas: - format: int32 - type: integer - resources: - description: ResourceRequirements describes the compute resource - requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - securityContext: - description: Set security context for the dashboards pods' container - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by this container. If set, this profile - overrides the pod's appArmorProfile. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default value is Default which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - service: - properties: - labels: - additionalProperties: - type: string - type: object - loadBalancerSourceRanges: - items: - type: string - type: array - type: - default: ClusterIP - description: Service Type string describes ingress methods - for a service - enum: - - ClusterIP - - NodePort - - LoadBalancer - type: string - type: object - tls: - properties: - caSecret: - description: Optional, secret that contains the ca certificate - as ca.crt. If this and generate=true is set the existing - CA cert from that secret is used to generate the node certs. - In this case must contain ca.crt and ca.key fields - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - enable: - description: Enable HTTPS for Dashboards - type: boolean - generate: - description: Generate certificate, if false secret must be - provided - type: boolean - secret: - description: Optional, name of a TLS secret that contains - ca.crt, tls.key and tls.crt data. If ca.crt is in a different - secret provide it via the caSecret field - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: object - tolerations: - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - version: - type: string - required: - - replicas - - version - type: object - general: - description: |- - INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file - properties: - additionalConfig: - additionalProperties: - type: string - description: Extra items to add to the opensearch.yml - type: object - additionalVolumes: - description: Additional volumes to mount to all pods in the cluster - items: - properties: - configMap: - description: ConfigMap to use to populate the volume - properties: - defaultMode: - description: |- - defaultMode is optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether the ConfigMap - or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - csi: - description: CSI object to use to populate the volume - properties: - driver: - description: |- - driver is the name of the CSI driver that handles this volume. - Consult with your admin for the correct name as registered in the cluster. - type: string - fsType: - description: |- - fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated CSI driver - which will determine the default filesystem to apply. - type: string - nodePublishSecretRef: - description: |- - nodePublishSecretRef is a reference to the secret object containing - sensitive information to pass to the CSI driver to complete the CSI - NodePublishVolume and NodeUnpublishVolume calls. - This field is optional, and may be empty if no secret is required. If the - secret object contains more than one secret, all secret references are passed. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - readOnly: - description: |- - readOnly specifies a read-only configuration for the volume. - Defaults to false (read/write). - type: boolean - volumeAttributes: - additionalProperties: - type: string - description: |- - volumeAttributes stores driver-specific properties that are passed to the CSI - driver. Consult your driver's documentation for supported values. - type: object - required: - - driver - type: object - emptyDir: - description: EmptyDir to use to populate the volume - properties: - medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - name: - description: Name to use for the volume. Required. - type: string - path: - description: Path in the container to mount the volume at. - Required. - type: string - projected: - description: Projected object to use to populate the volume - properties: - defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - sources: - description: |- - sources is the list of volume projections. Each entry in this list - handles one source. - items: - description: |- - Projection that may be projected along with other supported volume types. - Exactly one of these fields must be set. - properties: - clusterTrustBundle: - description: |- - ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - of ClusterTrustBundle objects in an auto-updating file. - - Alpha, gated by the ClusterTrustBundleProjection feature gate. - - ClusterTrustBundle objects can either be selected by name, or by the - combination of signer name and a label selector. - - Kubelet performs aggressive normalization of the PEM contents written - into the pod filesystem. Esoteric PEM features such as inter-block - comments and block headers are stripped. Certificates are deduplicated. - The ordering of certificates within the file is arbitrary, and Kubelet - may change the order over time. - properties: - labelSelector: - description: |- - Select all ClusterTrustBundles that match this label selector. Only has - effect if signerName is set. Mutually-exclusive with name. If unset, - interpreted as "match nothing". If set but empty, interpreted as "match - everything". - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - name: - description: |- - Select a single ClusterTrustBundle by object name. Mutually-exclusive - with signerName and labelSelector. - type: string - optional: - description: |- - If true, don't block pod startup if the referenced ClusterTrustBundle(s) - aren't available. If using name, then the named ClusterTrustBundle is - allowed not to exist. If using signerName, then the combination of - signerName and labelSelector is allowed to match zero - ClusterTrustBundles. - type: boolean - path: - description: Relative path from the volume - root to write the bundle. - type: string - signerName: - description: |- - Select all ClusterTrustBundles that match this signer name. - Mutually-exclusive with name. The contents of all selected - ClusterTrustBundles will be unified and deduplicated. - type: string - required: - - path - type: object - configMap: - description: configMap information about the configMap - data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path - within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional specify whether the - ConfigMap or its keys must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - downwardAPI: - description: downwardAPI information about the - downwardAPI data to project - properties: - items: - description: Items is a list of DownwardAPIVolume - file - items: - description: DownwardAPIVolumeFile represents - information to create the file containing - the pod field - properties: - fieldRef: - description: 'Required: Selects a field - of the pod: only annotations, labels, - name, namespace and uid are supported.' - properties: - apiVersion: - description: Version of the schema - the FieldPath is written in terms - of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to - select in the specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: 'Required: Path is the - relative path name of the file to - be created. Must not be absolute or - contain the ''..'' path. Must be utf-8 - encoded. The first item of the relative - path must not start with ''..''' - type: string - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - properties: - containerName: - description: 'Container name: required - for volumes, optional for env - vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output - format of the exposed resources, - defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - required: - - path - type: object - type: array - x-kubernetes-list-type: atomic - type: object - secret: - description: secret information about the secret - data to project - properties: - items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path - within a volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: optional field specify whether - the Secret or its key must be defined - type: boolean - type: object - x-kubernetes-map-type: atomic - serviceAccountToken: - description: serviceAccountToken is information - about the serviceAccountToken data to project - properties: - audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. - type: string - expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. - format: int64 - type: integer - path: - description: |- - path is the path relative to the mount point of the file to project the - token into. - type: string - required: - - path - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - restartPods: - description: Whether to restart the pods on content change - type: boolean - secret: - description: Secret to use populate the volume - properties: - defaultMode: - description: |- - defaultMode is Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values - for mode bits. Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - items: - description: |- - items If unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a - volume. - properties: - key: - description: key is the key to project. - type: string - mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. - format: int32 - type: integer - path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. - type: string - required: - - key - - path - type: object - type: array - x-kubernetes-list-type: atomic - optional: - description: optional field specify whether the Secret - or its keys must be defined - type: boolean - secretName: - description: |- - secretName is the name of the secret in the pod's namespace to use. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - type: string - type: object - subPath: - description: SubPath of the referenced volume to mount. - type: string - required: - - name - - path - type: object - type: array - annotations: - additionalProperties: - type: string - description: Adds support for annotations in services - type: object - command: - type: string - defaultRepo: - type: string - drainDataNodes: - description: Drain data nodes controls whether to drain data notes - on rolling restart operations - type: boolean - httpPort: - default: 9200 - format: int32 - type: integer - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image - type: string - imagePullSecrets: - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - keystore: - description: Populate opensearch keystore before startup - items: - properties: - keyMappings: - additionalProperties: - type: string - description: Key mappings from secret to keystore keys - type: object - secret: - description: Secret containing key value pairs - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: object - type: array - monitoring: - properties: - enable: - type: boolean - labels: - additionalProperties: - type: string - type: object - monitoringUserSecret: - type: string - pluginUrl: - type: string - scrapeInterval: - type: string - tlsConfig: - properties: - insecureSkipVerify: - type: boolean - serverName: - type: string - type: object - type: object - pluginsList: - items: - type: string - type: array - podSecurityContext: - description: Set security context for the cluster pods - properties: - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - fsGroup: - description: |- - A special supplemental group that applies to all containers in a pod. - Some volume types allow the Kubelet to change the ownership of that volume - to be owned by the pod: - - 1. The owning GID will be the FSGroup - 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - fsGroupChangePolicy: - description: |- - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - before being exposed inside Pod. This field will only apply to - volume types which support fsGroup based ownership(and permissions). - It will have no effect on ephemeral volume types such as: secret, configmaps - and emptydir. - Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name is windows. - type: string - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxChangePolicy: - description: |- - seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. - It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. - Valid values are "MountOption" and "Recursive". - - "Recursive" means relabeling of all files on all Pod volumes by the container runtime. - This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. - - "MountOption" mounts all eligible Pod volumes with `-o context` mount option. - This requires all Pods that share the same volume to use the same SELinux label. - It is not possible to share the same volume among privileged and unprivileged Pods. - Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes - whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their - CSIDriver instance. Other volumes are always re-labelled recursively. - "MountOption" value is allowed only when SELinuxMount feature gate is enabled. - - If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. - If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes - and "Recursive" for all other volumes. - - This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. - - All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. - Note that this field cannot be set when spec.os.name is windows. - type: string - seLinuxOptions: - description: |- - The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in SecurityContext. If set in - both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - supplementalGroups: - description: |- - A list of groups applied to the first process run in each container, in - addition to the container's primary GID and fsGroup (if specified). If - the SupplementalGroupsPolicy feature is enabled, the - supplementalGroupsPolicy field determines whether these are in addition - to or instead of any group memberships defined in the container image. - If unspecified, no additional groups are added, though group memberships - defined in the container image may still be used, depending on the - supplementalGroupsPolicy field. - Note that this field cannot be set when spec.os.name is windows. - items: - format: int64 - type: integer - type: array - x-kubernetes-list-type: atomic - supplementalGroupsPolicy: - description: |- - Defines how supplemental groups of the first container processes are calculated. - Valid values are "Merge" and "Strict". If not specified, "Merge" is used. - (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled - and the container runtime must implement support for this feature. - Note that this field cannot be set when spec.os.name is windows. - type: string - sysctls: - description: |- - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - sysctls (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name is windows. - items: - description: Sysctl defines a kernel parameter to be set - properties: - name: - description: Name of a property to set - type: string - value: - description: Value of a property to set - type: string - required: - - name - - value - type: object - type: array - x-kubernetes-list-type: atomic - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - securityContext: - description: Set security context for the cluster pods' container - properties: - allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. - type: boolean - appArmorProfile: - description: |- - appArmorProfile is the AppArmor options to use by this container. If set, this profile - overrides the pod's appArmorProfile. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile loaded on the node that should be used. - The profile must be preconfigured on the node to work. - Must match the loaded name of the profile. - Must be set if and only if type is "Localhost". - type: string - type: - description: |- - type indicates which kind of AppArmor profile will be applied. - Valid options are: - Localhost - a profile pre-loaded on the node. - RuntimeDefault - the container runtime's default profile. - Unconfined - no AppArmor enforcement. - type: string - required: - - type - type: object - capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. - properties: - add: - description: Added capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - drop: - description: Removed capabilities - items: - description: Capability represent POSIX capabilities - type - type: string - type: array - x-kubernetes-list-type: atomic - type: object - privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default value is Default which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. - type: string - readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. - type: boolean - runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: boolean - runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - format: int64 - type: integer - seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. - properties: - level: - description: Level is SELinux level label that applies - to the container. - type: string - role: - description: Role is a SELinux role label that applies - to the container. - type: string - type: - description: Type is a SELinux type label that applies - to the container. - type: string - user: - description: User is a SELinux user label that applies - to the container. - type: string - type: object - seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. - properties: - localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. - type: string - type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. - type: string - required: - - type - type: object - windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. - properties: - gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the - GMSA credential spec to use. - type: string - hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. - type: boolean - runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - type: string - type: object - type: object - serviceAccount: - type: string - serviceName: - type: string - setVMMaxMapCount: - type: boolean - snapshotRepositories: - items: - properties: - name: - type: string - settings: - additionalProperties: - type: string - type: object - type: - type: string - required: - - name - - type - type: object - type: array - vendor: - enum: - - Opensearch - - Op - - OP - - os - - opensearch - type: string - version: - type: string - required: - - serviceName - type: object - initHelper: - properties: - image: - type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull - a container image - type: string - imagePullSecrets: - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - resources: - description: ResourceRequirements describes the compute resource - requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - version: - type: string - type: object - nodePools: - items: - properties: - additionalConfig: - additionalProperties: - type: string - type: object - affinity: - description: Affinity is a group of affinity scheduling rules. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for - the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. - items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated - with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - weight: - description: Weight associated with matching the - corresponding nodeSelectorTerm, in the range - 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. - properties: - key: - description: The label key that the - selector applies to. - type: string - operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - type: object - x-kubernetes-map-type: atomic - type: array - x-kubernetes-list-type: atomic - required: - - nodeSelectorTerms - type: object - x-kubernetes-map-type: atomic - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, etc. - as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred - node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list - of label selector requirements. The - requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key - that the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - x-kubernetes-list-type: atomic - requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running - properties: - labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both matchLabelKeys and labelSelector. - Also, matchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. - Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - items: - type: string - type: array - x-kubernetes-list-type: atomic - namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". - items: - type: string - type: array - x-kubernetes-list-type: atomic - topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - x-kubernetes-list-type: atomic - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - component: - type: string - diskSize: - type: string - env: - items: - description: EnvVar represents an environment variable present - in a Container. - properties: - name: - description: Name of the environment variable. Must be - a C_IDENTIFIER. - type: string - value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". - type: string - valueFrom: - description: Source for the environment variable's value. - Cannot be used if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the ConfigMap or - its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - properties: - apiVersion: - description: Version of the schema the FieldPath - is written in terms of, defaults to "v1". - type: string - fieldPath: - description: Path of the field to select in the - specified API version. - type: string - required: - - fieldPath - type: object - x-kubernetes-map-type: atomic - resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - properties: - containerName: - description: 'Container name: required for volumes, - optional for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the output format of the - exposed resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource to select' - type: string - required: - - resource - type: object - x-kubernetes-map-type: atomic - secretKeyRef: - description: Selects a key of a secret in the pod's - namespace - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its - key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - type: object - required: - - name - type: object - type: array - jvm: - type: string - labels: - additionalProperties: - type: string - type: object - nodeSelector: - additionalProperties: - type: string - type: object - pdb: - properties: - enable: - type: boolean - maxUnavailable: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - minAvailable: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - type: object - persistence: - description: PersistencConfig defines options for data persistence - properties: - emptyDir: - description: |- - Represents an empty directory for a pod. - Empty directory volumes support ownership management and SELinux relabeling. - properties: - medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - hostPath: - description: |- - Represents a host path mapped into a pod. - Host path volumes do not support ownership management or SELinux relabeling. - properties: - path: - description: |- - path of the directory on the host. - If the path is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - type: - description: |- - type for HostPath Volume - Defaults to "" - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - type: string - required: - - path - type: object - pvc: - properties: - accessModes: - items: - type: string - type: array - storageClass: - type: string - type: object - type: object - priorityClassName: - type: string - probes: - properties: - liveness: - properties: - failureThreshold: - format: int32 - type: integer - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - readiness: - properties: - failureThreshold: - format: int32 - type: integer - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - startup: - properties: - failureThreshold: - format: int32 - type: integer - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - type: object - replicas: - format: int32 - type: integer - resources: - description: ResourceRequirements describes the compute resource - requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - roles: - items: - type: string - type: array - tolerations: - items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . - properties: - effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - topologySpreadConstraints: - items: - description: TopologySpreadConstraint specifies how to spread - matching pods among the given topology. - properties: - labelSelector: - description: |- - LabelSelector is used to find matching pods. - Pods that match this label selector are counted to determine the number of pods - in their corresponding topology domain. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select the pods over which - spreading will be calculated. The keys are used to lookup values from the - incoming pod labels, those key-value labels are ANDed with labelSelector - to select the group of existing pods over which spreading will be calculated - for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - MatchLabelKeys cannot be set when LabelSelector isn't set. - Keys that don't exist in the incoming pod labels will - be ignored. A null or empty list means only match against labelSelector. - - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - items: - type: string - type: array - x-kubernetes-list-type: atomic - maxSkew: - description: |- - MaxSkew describes the degree to which pods may be unevenly distributed. - When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - between the number of matching pods in the target topology and the global minimum. - The global minimum is the minimum number of matching pods in an eligible domain - or zero if the number of eligible domains is less than MinDomains. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 2/2/1: - In this case, the global minimum is 1. - | zone1 | zone2 | zone3 | - | P P | P P | P | - - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - violate MaxSkew(1). - - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - to topologies that satisfy it. - It's a required field. Default value is 1 and 0 is not allowed. - format: int32 - type: integer - minDomains: - description: |- - MinDomains indicates a minimum number of eligible domains. - When the number of eligible domains with matching topology keys is less than minDomains, - Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - And when the number of eligible domains with matching topology keys equals or greater than minDomains, - this value has no effect on scheduling. - As a result, when the number of eligible domains is less than minDomains, - scheduler won't schedule more than maxSkew Pods to those domains. - If value is nil, the constraint behaves as if MinDomains is equal to 1. - Valid values are integers greater than 0. - When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - labelSelector spread as 2/2/2: - | zone1 | zone2 | zone3 | - | P P | P P | P P | - The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - In this situation, new pod with the same labelSelector cannot be scheduled, - because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - it will violate MaxSkew. - format: int32 - type: integer - nodeAffinityPolicy: - description: |- - NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - when calculating pod topology spread skew. Options are: - - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - - If this value is nil, the behavior is equivalent to the Honor policy. - type: string - nodeTaintsPolicy: - description: |- - NodeTaintsPolicy indicates how we will treat node taints when calculating - pod topology spread skew. Options are: - - Honor: nodes without taints, along with tainted nodes for which the incoming pod - has a toleration, are included. - - Ignore: node taints are ignored. All nodes are included. - - If this value is nil, the behavior is equivalent to the Ignore policy. - type: string - topologyKey: - description: |- - TopologyKey is the key of node labels. Nodes that have a label with this key - and identical values are considered to be in the same topology. - We consider each as a "bucket", and try to put balanced number - of pods into each bucket. - We define a domain as a particular instance of a topology. - Also, we define an eligible domain as a domain whose nodes meet the requirements of - nodeAffinityPolicy and nodeTaintsPolicy. - e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - It's a required field. - type: string - whenUnsatisfiable: - description: |- - WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - the spread constraint. - - DoNotSchedule (default) tells the scheduler not to schedule it. - - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. - A constraint is considered "Unsatisfiable" for an incoming pod - if and only if every possible node assignment for that pod would violate - "MaxSkew" on some topology. - For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - labelSelector spread as 3/1/1: - | zone1 | zone2 | zone3 | - | P P P | P | P | - If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - won't make it *more* imbalanced. - It's a required field. - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - required: - - component - - replicas - - roles - type: object - type: array - security: - description: Security defines options for managing the opensearch-security - plugin - properties: - config: - properties: - adminCredentialsSecret: - description: Secret that contains fields username and password - to be used by the operator to access the opensearch cluster - for node draining. Must be set if custom securityconfig - is provided. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - adminSecret: - description: TLS Secret that contains a client certificate - (tls.key, tls.crt, ca.crt) with admin rights in the opensearch - cluster. Must be set if transport certificates are provided - by user and not generated - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - securityConfigSecret: - description: Secret that contains the differnt yml files of - the opensearch-security config (config.yml, internal_users.yml, - ...) - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - updateJob: - description: Specific configs for the SecurityConfig update - job - properties: - resources: - description: ResourceRequirements describes the compute - resource requirements. - properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry - in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - request: - description: |- - Request is the name chosen for a request in the referenced claim. - If empty, everything from the claim is made available, otherwise - only the result of this request. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - type: object - type: object - type: object - type: object - tls: - description: Configure tls usage for transport and http interface - properties: - http: - properties: - caSecret: - description: Optional, secret that contains the ca certificate - as ca.crt. If this and generate=true is set the existing - CA cert from that secret is used to generate the node - certs. In this case must contain ca.crt and ca.key fields - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - generate: - description: If set to true the operator will generate - a CA and certificates for the cluster to use, if false - secrets with existing certificates must be supplied - type: boolean - secret: - description: Optional, name of a TLS secret that contains - ca.crt, tls.key and tls.crt data. If ca.crt is in a - different secret provide it via the caSecret field - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: object - transport: - properties: - adminDn: - description: DNs of certificates that should have admin - access, mainly used for securityconfig updates via securityadmin.sh, - only used when existing certificates are provided - items: - type: string - type: array - caSecret: - description: Optional, secret that contains the ca certificate - as ca.crt. If this and generate=true is set the existing - CA cert from that secret is used to generate the node - certs. In this case must contain ca.crt and ca.key fields - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - generate: - description: If set to true the operator will generate - a CA and certificates for the cluster to use, if false - secrets with existing certificates must be supplied - type: boolean - nodesDn: - description: Allowed Certificate DNs for nodes, only used - when existing certificates are provided - items: - type: string - type: array - perNode: - description: Configure transport node certificate - type: boolean - secret: - description: Optional, name of a TLS secret that contains - ca.crt, tls.key and tls.crt data. If ca.crt is in a - different secret provide it via the caSecret field - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: object - type: object - type: object - required: - - nodePools - type: object - status: - description: ClusterStatus defines the observed state of Es - properties: - availableNodes: - description: AvailableNodes is the number of available instances. - format: int32 - type: integer - componentsStatus: - items: - properties: - component: - type: string - conditions: - items: - type: string - type: array - description: - type: string - status: - type: string - type: object - type: array - health: - description: OpenSearchHealth is the health of the cluster as returned - by the health API. - type: string - initialized: - type: boolean - phase: - description: |- - INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - Important: Run "make" to regenerate code after modifying this file - type: string - version: - type: string - required: - - componentsStatus - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchcomponenttemplates.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpensearchComponentTemplate - listKind: OpensearchComponentTemplateList - plural: opensearchcomponenttemplates - shortNames: - - opensearchcomponenttemplate - singular: opensearchcomponenttemplate - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: OpensearchComponentTemplate is the schema for the OpenSearch - component templates API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - properties: - _meta: - description: Optional user metadata about the component template - x-kubernetes-preserve-unknown-fields: true - allowAutoCreate: - description: If true, then indices can be automatically created using - this template - type: boolean - name: - description: The name of the component template. Defaults to metadata.name - type: string - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - template: - description: The template that should be applied - properties: - aliases: - additionalProperties: - description: Describes the specs of an index alias - properties: - alias: - description: The name of the alias. - type: string - filter: - description: Query used to limit documents the alias can - access. - x-kubernetes-preserve-unknown-fields: true - index: - description: The name of the index that the alias points - to. - type: string - isWriteIndex: - description: If true, the index is the write index for the - alias - type: boolean - routing: - description: Value used to route indexing and search operations - to a specific shard. - type: string - type: object - description: Aliases to add - type: object - mappings: - description: Mapping for fields in the index - x-kubernetes-preserve-unknown-fields: true - settings: - description: Configuration options for the index - x-kubernetes-preserve-unknown-fields: true - type: object - version: - description: Version number used to manage the component template - externally - type: integer - required: - - opensearchCluster - - template - type: object - status: - properties: - componentTemplateName: - description: Name of the currently managed component template - type: string - existingComponentTemplate: - type: boolean - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchindextemplates.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpensearchIndexTemplate - listKind: OpensearchIndexTemplateList - plural: opensearchindextemplates - shortNames: - - opensearchindextemplate - singular: opensearchindextemplate - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: OpensearchIndexTemplate is the schema for the OpenSearch index - templates API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - properties: - _meta: - description: Optional user metadata about the index template - x-kubernetes-preserve-unknown-fields: true - composedOf: - description: |- - An ordered list of component template names. Component templates are merged in the order specified, - meaning that the last component template specified has the highest precedence - items: - type: string - type: array - dataStream: - description: The dataStream config that should be applied - properties: - timestamp_field: - description: TimestampField for dataStream - properties: - name: - description: Name of the field that are used for the DataStream - type: string - required: - - name - type: object - type: object - indexPatterns: - description: Array of wildcard expressions used to match the names - of indices during creation - items: - type: string - type: array - name: - description: The name of the index template. Defaults to metadata.name - type: string - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - priority: - description: |- - Priority to determine index template precedence when a new data stream or index is created. - The index template with the highest priority is chosen - type: integer - template: - description: The template that should be applied - properties: - aliases: - additionalProperties: - description: Describes the specs of an index alias - properties: - alias: - description: The name of the alias. - type: string - filter: - description: Query used to limit documents the alias can - access. - x-kubernetes-preserve-unknown-fields: true - index: - description: The name of the index that the alias points - to. - type: string - isWriteIndex: - description: If true, the index is the write index for the - alias - type: boolean - routing: - description: Value used to route indexing and search operations - to a specific shard. - type: string - type: object - description: Aliases to add - type: object - mappings: - description: Mapping for fields in the index - x-kubernetes-preserve-unknown-fields: true - settings: - description: Configuration options for the index - x-kubernetes-preserve-unknown-fields: true - type: object - version: - description: Version number used to manage the component template - externally - type: integer - required: - - indexPatterns - - opensearchCluster - type: object - status: - properties: - existingIndexTemplate: - type: boolean - indexTemplateName: - description: Name of the currently managed index template - type: string - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchismpolicies.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpenSearchISMPolicy - listKind: OpenSearchISMPolicyList - plural: opensearchismpolicies - shortNames: - - ismp - - ismpolicy - singular: opensearchismpolicy - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ISMPolicySpec is the specification for the ISM policy for - OS. - properties: - applyToExistingIndices: - description: If true, apply the policy to existing indices that match - the index patterns in the ISM template. - type: boolean - defaultState: - description: The default starting state for each index that uses this - policy. - type: string - description: - description: A human-readable description of the policy. - type: string - errorNotification: - properties: - channel: - type: string - destination: - description: The destination URL. - properties: - amazon: - properties: - url: - type: string - type: object - chime: - properties: - url: - type: string - type: object - customWebhook: - properties: - url: - type: string - type: object - slack: - properties: - url: - type: string - type: object - type: object - messageTemplate: - description: The text of the message - properties: - source: - type: string - type: object - type: object - ismTemplate: - description: Specify an ISM template pattern that matches the index - to apply the policy. - properties: - indexPatterns: - description: Index patterns on which this policy has to be applied - items: - type: string - type: array - priority: - description: Priority of the template, defaults to 0 - type: integer - required: - - indexPatterns - type: object - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - policyId: - type: string - states: - description: The states that you define in the policy. - items: - properties: - actions: - description: The actions to execute after entering a state. - items: - description: Actions are the steps that the policy sequentially - executes on entering a specific state. - properties: - alias: - properties: - actions: - description: Allocate the index to a node with a specified - attribute. - items: - properties: - add: - properties: - aliases: - description: The name of the alias. - items: - type: string - type: array - index: - description: The name of the index that - the alias points to. - type: string - isWriteIndex: - description: Specify the index that accepts - any write operations to the alias. - type: boolean - routing: - description: Limit search to an associated - shard value - type: string - type: object - remove: - properties: - aliases: - description: The name of the alias. - items: - type: string - type: array - index: - description: The name of the index that - the alias points to. - type: string - isWriteIndex: - description: Specify the index that accepts - any write operations to the alias. - type: boolean - routing: - description: Limit search to an associated - shard value - type: string - type: object - type: object - type: array - required: - - actions - type: object - allocation: - description: Allocate the index to a node with a specific - attribute set - properties: - exclude: - description: Allocate the index to a node with a specified - attribute. - type: string - include: - description: Allocate the index to a node with any - of the specified attributes. - type: string - require: - description: Don’t allocate the index to a node with - any of the specified attributes. - type: string - waitFor: - description: Wait for the policy to execute before - allocating the index to a node with a specified - attribute. - type: string - required: - - exclude - - include - - require - - waitFor - type: object - close: - description: Closes the managed index. - type: object - delete: - description: Deletes a managed index. - type: object - forceMerge: - description: Reduces the number of Lucene segments by - merging the segments of individual shards. - properties: - maxNumSegments: - description: The number of segments to reduce the - shard to. - format: int64 - type: integer - required: - - maxNumSegments - type: object - indexPriority: - description: Set the priority for the index in a specific - state. - properties: - priority: - description: The priority for the index as soon as - it enters a state. - format: int64 - type: integer - required: - - priority - type: object - notification: - description: Name string `json:"name,omitempty"` - properties: - destination: - type: string - messageTemplate: - properties: - source: - type: string - type: object - required: - - destination - - messageTemplate - type: object - open: - description: Opens a managed index. - type: object - readOnly: - description: Sets a managed index to be read only. - type: object - readWrite: - description: Sets a managed index to be writeable. - type: object - replicaCount: - description: Sets the number of replicas to assign to - an index. - properties: - numberOfReplicas: - format: int64 - type: integer - required: - - numberOfReplicas - type: object - retry: - description: The retry configuration for the action. - properties: - backoff: - description: The backoff policy type to use when retrying. - type: string - count: - description: The number of retry counts. - format: int64 - type: integer - delay: - description: The time to wait between retries. - type: string - required: - - count - type: object - rollover: - description: Rolls an alias over to a new index when the - managed index meets one of the rollover conditions. - properties: - minDocCount: - description: The minimum number of documents required - to roll over the index. - format: int64 - type: integer - minIndexAge: - description: The minimum age required to roll over - the index. - type: string - minPrimaryShardSize: - description: The minimum storage size of a single - primary shard required to roll over the index. - type: string - minSize: - description: The minimum size of the total primary - shard storage (not counting replicas) required to - roll over the index. - type: string - type: object - rollup: - description: Periodically reduce data granularity by rolling - up old data into summarized indexes. - type: object - shrink: - description: Allows you to reduce the number of primary - shards in your indexes - properties: - forceUnsafe: - description: If true, executes the shrink action even - if there are no replicas. - type: boolean - maxShardSize: - description: The maximum size in bytes of a shard - for the target index. - type: string - numNewShards: - description: The maximum number of primary shards - in the shrunken index. - type: integer - percentageOfSourceShards: - description: Percentage of the number of original - primary shards to shrink. - format: int64 - type: integer - targetIndexNameTemplate: - description: The name of the shrunken index. - type: string - type: object - snapshot: - description: Back up your cluster’s indexes and state - properties: - repository: - description: The repository name that you register - through the native snapshot API operations. - type: string - snapshot: - description: The name of the snapshot. - type: string - required: - - repository - - snapshot - type: object - timeout: - description: The timeout period for the action. Accepts - time units for minutes, hours, and days. - type: string - type: object - type: array - name: - description: The name of the state. - type: string - transitions: - description: The next states and the conditions required to - transition to those states. If no transitions exist, the policy - assumes that it’s complete and can now stop managing the index - items: - properties: - conditions: - description: conditions for the transition. - properties: - cron: - description: The cron job that triggers the transition - if no other transition happens first. - properties: - cron: - description: A wrapper for the cron job that triggers - the transition if no other transition happens - first. This wrapper is here to adhere to the - OpenSearch API. - properties: - expression: - description: The cron expression that triggers - the transition. - type: string - timezone: - description: The timezone that triggers the - transition. - type: string - required: - - expression - - timezone - type: object - required: - - cron - type: object - minDocCount: - description: The minimum document count of the index - required to transition. - format: int64 - type: integer - minIndexAge: - description: The minimum age of the index required - to transition. - type: string - minRolloverAge: - description: The minimum age required after a rollover - has occurred to transition to the next state. - type: string - minSize: - description: The minimum size of the total primary - shard storage (not counting replicas) required to - transition. - type: string - type: object - stateName: - description: The name of the state to transition to if - the conditions are met. - type: string - required: - - conditions - - stateName - type: object - type: array - required: - - actions - - name - type: object - type: array - required: - - defaultState - - description - - states - type: object - status: - description: OpensearchISMPolicyStatus defines the observed state of OpensearchISMPolicy - properties: - existingISMPolicy: - type: boolean - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - policyId: - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchroles.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpensearchRole - listKind: OpensearchRoleList - plural: opensearchroles - shortNames: - - opensearchrole - singular: opensearchrole - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: OpensearchRole is the Schema for the opensearchroles API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: OpensearchRoleSpec defines the desired state of OpensearchRole - properties: - clusterPermissions: - items: - type: string - type: array - indexPermissions: - items: - properties: - allowedActions: - items: - type: string - type: array - dls: - type: string - fls: - items: - type: string - type: array - indexPatterns: - items: - type: string - type: array - maskedFields: - items: - type: string - type: array - type: object - type: array - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - tenantPermissions: - items: - properties: - allowedActions: - items: - type: string - type: array - tenantPatterns: - items: - type: string - type: array - type: object - type: array - required: - - opensearchCluster - type: object - status: - description: OpensearchRoleStatus defines the observed state of OpensearchRole - properties: - existingRole: - type: boolean - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchsnapshotpolicies.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpensearchSnapshotPolicy - listKind: OpensearchSnapshotPolicyList - plural: opensearchsnapshotpolicies - singular: opensearchsnapshotpolicy - scope: Namespaced - versions: - - additionalPrinterColumns: - - description: Existing policy state - jsonPath: .status.existingSnapshotPolicy - name: existingpolicy - type: boolean - - description: Snapshot policy name - jsonPath: .status.snapshotPolicyName - name: policyName - type: string - - jsonPath: .status.state - name: state - type: string - - jsonPath: .metadata.creationTimestamp - name: age - type: date - name: v1 - schema: - openAPIV3Schema: - description: OpensearchSnapshotPolicy is the Schema for the opensearchsnapshotpolicies - API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - properties: - creation: - properties: - schedule: - properties: - cron: - properties: - expression: - type: string - timezone: - type: string - required: - - expression - - timezone - type: object - required: - - cron - type: object - timeLimit: - type: string - required: - - schedule - type: object - deletion: - properties: - deleteCondition: - properties: - maxAge: - type: string - maxCount: - type: integer - minCount: - type: integer - type: object - schedule: - properties: - cron: - properties: - expression: - type: string - timezone: - type: string - required: - - expression - - timezone - type: object - required: - - cron - type: object - timeLimit: - type: string - type: object - description: - type: string - enabled: - type: boolean - notification: - properties: - channel: - properties: - id: - type: string - required: - - id - type: object - conditions: - properties: - creation: - type: boolean - deletion: - type: boolean - failure: - type: boolean - type: object - required: - - channel - type: object - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - policyName: - type: string - snapshotConfig: - properties: - dateFormat: - type: string - dateFormatTimezone: - type: string - ignoreUnavailable: - type: boolean - includeGlobalState: - type: boolean - indices: - type: string - metadata: - additionalProperties: - type: string - type: object - partial: - type: boolean - repository: - type: string - required: - - repository - type: object - required: - - creation - - opensearchCluster - - policyName - - snapshotConfig - type: object - status: - description: OpensearchSnapshotPolicyStatus defines the observed state - of OpensearchSnapshotPolicy - properties: - existingSnapshotPolicy: - type: boolean - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - reason: - type: string - snapshotPolicyName: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchtenants.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpensearchTenant - listKind: OpensearchTenantList - plural: opensearchtenants - shortNames: - - opensearchtenant - singular: opensearchtenant - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: OpensearchTenant is the Schema for the opensearchtenants API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: OpensearchTenantSpec defines the desired state of OpensearchTenant - properties: - description: - type: string - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - required: - - opensearchCluster - type: object - status: - description: OpensearchTenantStatus defines the observed state of OpensearchTenant - properties: - existingTenant: - type: boolean - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchuserrolebindings.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpensearchUserRoleBinding - listKind: OpensearchUserRoleBindingList - plural: opensearchuserrolebindings - shortNames: - - opensearchuserrolebinding - singular: opensearchuserrolebinding - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: OpensearchUserRoleBinding is the Schema for the opensearchuserrolebindings - API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: OpensearchUserRoleBindingSpec defines the desired state of - OpensearchUserRoleBinding - properties: - backendRoles: - items: - type: string - type: array - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - roles: - items: - type: string - type: array - users: - items: - type: string - type: array - required: - - opensearchCluster - - roles - type: object - status: - description: OpensearchUserRoleBindingStatus defines the observed state - of OpensearchUserRoleBinding - properties: - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - provisionedBackendRoles: - items: - type: string - type: array - provisionedRoles: - items: - type: string - type: array - provisionedUsers: - items: - type: string - type: array - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.0 - name: opensearchusers.opensearch.opster.io -spec: - group: opensearch.opster.io - names: - kind: OpensearchUser - listKind: OpensearchUserList - plural: opensearchusers - shortNames: - - opensearchuser - singular: opensearchuser - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: OpensearchUser is the Schema for the opensearchusers API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: OpensearchUserSpec defines the desired state of OpensearchUser - properties: - attributes: - additionalProperties: - type: string - type: object - backendRoles: - items: - type: string - type: array - opendistroSecurityRoles: - items: - type: string - type: array - opensearchCluster: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - passwordFrom: - description: SecretKeySelector selects a key of a Secret. - properties: - key: - description: The key of the secret to select from. Must be a - valid secret key. - type: string - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - optional: - description: Specify whether the Secret or its key must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - required: - - opensearchCluster - - passwordFrom - type: object - status: - description: OpensearchUserStatus defines the observed state of OpensearchUser - properties: - managedCluster: - description: |- - UID is a type that holds unique ID values, including UUIDs. Because we - don't ONLY use UUIDs, this is an alias to string. Being a type captures - intent and helps make sure that UIDs and names do not get conflated. - type: string - reason: - type: string - state: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.6.2 - name: servicemonitors.monitoring.coreos.com -spec: - conversion: - strategy: None - group: monitoring.coreos.com - names: - categories: - - prometheus-operator - kind: ServiceMonitor - listKind: ServiceMonitorList - plural: servicemonitors - singular: servicemonitor - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: ServiceMonitor defines monitoring for a set of services. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of desired Service selection for target discovery - by Prometheus. - properties: - endpoints: - description: A list of endpoints allowed as part of this ServiceMonitor. - items: - description: Endpoint defines a scrapeable endpoint serving Prometheus - metrics. - properties: - authorization: - description: Authorization section for this endpoint - properties: - credentials: - description: The secret's key that contains the credentials - of the request - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - type: - description: Set the authentication type. Defaults to Bearer, - Basic will cause an error - type: string - type: object - basicAuth: - description: 'BasicAuth allow an endpoint to authenticate over - basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' - properties: - password: - description: The secret in the service monitor namespace - that contains the password for authentication. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - username: - description: The secret in the service monitor namespace - that contains the username for authentication. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - type: object - bearerTokenFile: - description: File to read bearer token for scraping targets. - type: string - bearerTokenSecret: - description: Secret to mount to read bearer token for scraping - targets. The secret needs to be in the same namespace as the - service monitor and accessible by the Prometheus Operator. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - followRedirects: - description: FollowRedirects configures whether scrape requests - follow HTTP 3xx redirects. - type: boolean - honorLabels: - description: HonorLabels chooses the metric's labels on collisions - with target labels. - type: boolean - honorTimestamps: - description: HonorTimestamps controls whether Prometheus respects - the timestamps present in scraped data. - type: boolean - interval: - description: Interval at which metrics should be scraped - type: string - metricRelabelings: - description: MetricRelabelConfigs to apply to samples before - ingestion. - items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It - defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' - properties: - action: - default: replace - description: Action to perform based on regex matching. - Default is 'replace' - enum: - - replace - - keep - - drop - - hashmod - - labelmap - - labeldrop - - labelkeep - type: string - modulus: - description: Modulus to take of the hash of the source - label values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' - type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. - items: - description: LabelName is a valid Prometheus label name - which may only contain ASCII letters, numbers, as - well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. - type: string - type: object - type: array - oauth2: - description: OAuth2 for the URL. Only valid in Prometheus versions - 2.27.0 and newer. - properties: - clientId: - description: The secret or configmap containing the OAuth2 - client id - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - clientSecret: - description: The secret containing the OAuth2 client secret - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - endpointParams: - additionalProperties: - type: string - description: Parameters to append to the token URL - type: object - scopes: - description: OAuth2 scopes used for the token request - items: - type: string - type: array - tokenUrl: - description: The URL to fetch the token from - minLength: 1 - type: string - required: - - clientId - - clientSecret - - tokenUrl - type: object - params: - additionalProperties: - items: - type: string - type: array - description: Optional HTTP URL parameters - type: object - path: - description: HTTP path to scrape for metrics. - type: string - port: - description: Name of the service port this endpoint refers to. - Mutually exclusive with targetPort. - type: string - proxyUrl: - description: ProxyURL eg http://proxyserver:2195 Directs scrapes - to proxy through this endpoint. - type: string - relabelings: - description: 'RelabelConfigs to apply to samples before scraping. - Prometheus Operator automatically adds relabelings for a few - standard Kubernetes fields. The original scrape job''s name - is available via the `__tmp_prometheus_job_name` label. More - info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' - items: - description: 'RelabelConfig allows dynamic rewriting of the - label set, being applied to samples before ingestion. It - defines ``-section of Prometheus - configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' - properties: - action: - default: replace - description: Action to perform based on regex matching. - Default is 'replace' - enum: - - replace - - keep - - drop - - hashmod - - labelmap - - labeldrop - - labelkeep - type: string - modulus: - description: Modulus to take of the hash of the source - label values. - format: int64 - type: integer - regex: - description: Regular expression against which the extracted - value is matched. Default is '(.*)' - type: string - replacement: - description: Replacement value against which a regex replace - is performed if the regular expression matches. Regex - capture groups are available. Default is '$1' - type: string - separator: - description: Separator placed between concatenated source - label values. default is ';'. - type: string - sourceLabels: - description: The source labels select values from existing - labels. Their content is concatenated using the configured - separator and matched against the configured regular - expression for the replace, keep, and drop actions. - items: - description: LabelName is a valid Prometheus label name - which may only contain ASCII letters, numbers, as - well as underscores. - pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ - type: string - type: array - targetLabel: - description: Label to which the resulting value is written - in a replace action. It is mandatory for replace actions. - Regex capture groups are available. - type: string - type: object - type: array - scheme: - description: HTTP scheme to use for scraping. - type: string - scrapeTimeout: - description: Timeout after which the scrape is ended - type: string - targetPort: - anyOf: - - type: integer - - type: string - description: Name or number of the target port of the Pod behind - the Service, the port must be specified with container port - property. Mutually exclusive with port. - x-kubernetes-int-or-string: true - tlsConfig: - description: TLS configuration to use when scraping the endpoint - properties: - ca: - description: Struct containing the CA cert to use for the - targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - caFile: - description: Path to the CA cert in the Prometheus container - to use for the targets. - type: string - cert: - description: Struct containing the client cert file for - the targets. - properties: - configMap: - description: ConfigMap containing data to use for the - targets. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the ConfigMap or its - key must be defined - type: boolean - required: - - key - type: object - secret: - description: Secret containing data to use for the targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, - uid?' - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - type: object - certFile: - description: Path to the client cert file in the Prometheus - container for the targets. - type: string - insecureSkipVerify: - description: Disable target certificate validation. - type: boolean - keyFile: - description: Path to the client key file in the Prometheus - container for the targets. - type: string - keySecret: - description: Secret containing the client key file for the - targets. - properties: - key: - description: The key of the secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - required: - - key - type: object - serverName: - description: Used to verify the hostname for the targets. - type: string - type: object - type: object - type: array - jobLabel: - description: "Chooses the label of the Kubernetes `Endpoints`. Its - value will be used for the `job`-label's value of the created metrics. - \n Default & fallback value: the name of the respective Kubernetes - `Endpoint`." - type: string - labelLimit: - description: Per-scrape limit on number of labels that will be accepted - for a sample. Only valid in Prometheus versions 2.27.0 and newer. - format: int64 - type: integer - labelNameLengthLimit: - description: Per-scrape limit on length of labels name that will be - accepted for a sample. Only valid in Prometheus versions 2.27.0 - and newer. - format: int64 - type: integer - labelValueLengthLimit: - description: Per-scrape limit on length of labels value that will - be accepted for a sample. Only valid in Prometheus versions 2.27.0 - and newer. - format: int64 - type: integer - namespaceSelector: - description: Selector to select which namespaces the Kubernetes Endpoints - objects are discovered from. - properties: - any: - description: Boolean describing whether all namespaces are selected - in contrast to a list restricting them. - type: boolean - matchNames: - description: List of namespace names to select from. - items: - type: string - type: array - type: object - podTargetLabels: - description: PodTargetLabels transfers labels on the Kubernetes `Pod` - onto the created metrics. - items: - type: string - type: array - sampleLimit: - description: SampleLimit defines per-scrape limit on number of scraped - samples that will be accepted. - format: int64 - type: integer - selector: - description: Selector to select Endpoints objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. - The requirements are ANDed. - items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. - properties: - key: - description: key is the label key that the selector applies - to. - type: string - operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. - type: string - values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. - type: object - type: object - targetLabels: - description: TargetLabels transfers labels from the Kubernetes `Service` - onto the created metrics. - items: - type: string - type: array - targetLimit: - description: TargetLimit defines a limit on the number of scraped - targets that will be accepted. - format: int64 - type: integer - required: - - endpoints - - selector - type: object - required: - - spec - type: object - served: true - storage: true ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: opensearch-operator-controller-manager - namespace: opensearch-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: opensearch-operator-leader-election-role - namespace: opensearch-operator-system -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: opensearch-operator-manager-role -rules: -- apiGroups: - - apps - resources: - - deployments - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - configmaps - - namespaces - - persistentvolumeclaims - - pods - - secrets - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - opensearch.opster.io - resources: - - events - verbs: - - create - - patch -- apiGroups: - - opensearch.opster.io - resources: - - opensearchactiongroups - - opensearchclusters - - opensearchcomponenttemplates - - opensearchindextemplates - - opensearchismpolicies - - opensearchroles - - opensearchsnapshotpolicies - - opensearchtenants - - opensearchuserrolebindings - - opensearchusers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - opensearch.opster.io - resources: - - opensearchactiongroups/finalizers - - opensearchclusters/finalizers - - opensearchcomponenttemplates/finalizers - - opensearchindextemplates/finalizers - - opensearchismpolicies/finalizers - - opensearchroles/finalizers - - opensearchsnapshotpolicies/finalizers - - opensearchtenants/finalizers - - opensearchuserrolebindings/finalizers - - opensearchusers/finalizers - verbs: - - update -- apiGroups: - - opensearch.opster.io - resources: - - opensearchactiongroups/status - - opensearchclusters/status - - opensearchcomponenttemplates/status - - opensearchindextemplates/status - - opensearchismpolicies/status - - opensearchroles/status - - opensearchsnapshotpolicies/status - - opensearchtenants/status - - opensearchuserrolebindings/status - - opensearchusers/status - verbs: - - get - - patch - - update -- apiGroups: - - policy - resources: - - poddisruptionbudgets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: opensearch-operator-metrics-reader -rules: -- nonResourceURLs: - - /metrics - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: opensearch-operator-proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: opensearch-operator-leader-election-rolebinding - namespace: opensearch-operator-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: opensearch-operator-leader-election-role -subjects: -- kind: ServiceAccount - name: opensearch-operator-controller-manager - namespace: opensearch-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: opensearch-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: opensearch-operator-manager-role -subjects: -- kind: ServiceAccount - name: opensearch-operator-controller-manager - namespace: opensearch-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: opensearch-operator-proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: opensearch-operator-proxy-role -subjects: -- kind: ServiceAccount - name: opensearch-operator-controller-manager - namespace: opensearch-operator-system ---- -apiVersion: v1 -data: - controller_manager_config.yaml: | - apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 - kind: ControllerManagerConfig - health: - healthProbeBindAddress: :8081 - metrics: - bindAddress: 127.0.0.1:8080 - webhook: - port: 9443 - leaderElection: - leaderElect: true - resourceName: a867c7dc.opensearch.opster.io -kind: ConfigMap -metadata: - name: opensearch-operator-manager-config - namespace: opensearch-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - name: opensearch-operator-controller-manager-metrics-service - namespace: opensearch-operator-system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: opensearch-operator-controller-manager - namespace: opensearch-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=10 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - - args: - - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 - - --leader-elect - command: - - /manager - image: controller:latest - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - name: manager - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - resources: - limits: - cpu: 100m - memory: 30Mi - requests: - cpu: 100m - memory: 20Mi - securityContext: - allowPrivilegeEscalation: false - securityContext: - runAsNonRoot: true - serviceAccountName: opensearch-operator-controller-manager - terminationGracePeriodSeconds: 10 diff --git a/src/api/go/pxapi/examples/standalone_pem_example/example.go b/src/api/go/pxapi/examples/standalone_pem_example/example.go index 3b3247e11dd..64e1e3b10da 100644 --- a/src/api/go/pxapi/examples/standalone_pem_example/example.go +++ b/src/api/go/pxapi/examples/standalone_pem_example/example.go @@ -30,66 +30,18 @@ import ( // Define PxL script with one table output. var ( - stream = ` -import px -df = px.DataFrame('http_events') -px.display(df.stream()) -` pxl = ` import px -import pxlog -table = 'vector.json' -f = '/home/ddelnano/code/pixie-worktree/vector.json' -pxlog.FileSource(f, table, '5m') -df = px.DataFrame(table) -px.display(df)` - bpftrace = ` -import pxtrace -import px -# Adapted from https://github.com/iovisor/bpftrace/blob/master/tools/tcpretrans.bt -program = """ -// tcpretrans.bt Trace or count TCP retransmits -// For Linux, uses bpftrace and eBPF. -// -// Copyright (c) 2018 Dale Hamel. -// Licensed under the Apache License, Version 2.0 (the "License") -#include -#include -kprobe:tcp_retransmit_skb -{ - $sk = (struct sock *)arg0; - $inet_family = $sk->__sk_common.skc_family; - $AF_INET = (uint16) 2; - $AF_INET6 = (uint16) 10; - if ($inet_family == $AF_INET || $inet_family == $AF_INET6) { - if ($inet_family == $AF_INET) { - $daddr = ntop($sk->__sk_common.skc_daddr); - $saddr = ntop($sk->__sk_common.skc_rcv_saddr); - } else { - $daddr = ntop($sk->__sk_common.skc_v6_daddr.in6_u.u6_addr8); - $saddr = ntop($sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr8); - } - $sport = $sk->__sk_common.skc_num; - $dport = $sk->__sk_common.skc_dport; - // Destination port is big endian, it must be flipped - $dport = ($dport >> 8) | (($dport << 8) & 0x00FF00); - printf(\"time_:%llu src_ip:%s src_port:%d dst_ip:%s dst_port:%d\", - nsecs, - $saddr, - $sport, - $daddr, - $dport); - } -} -""" -table_name = 'tcp_retransmits_table' -pxtrace.UpsertTracepoint('tcp_retranmits_probe', - table_name, - program, - pxtrace.kprobe(), - "2m") -df = px.DataFrame(table=table_name, select=['time_', 'src_ip', 'src_port', 'dst_ip', 'dst_port']) +# Look at the http_events. +df = px.DataFrame(table='http_events') + +# Grab the command line from the metadata. +df.cmdline = px.upid_to_cmdline(df.upid) + +# Limit to the first 10. +df = df.head(10) + px.display(df)` ) diff --git a/src/carnot/BUILD.bazel b/src/carnot/BUILD.bazel index 30132ab5f14..b22fd1a949a 100644 --- a/src/carnot/BUILD.bazel +++ b/src/carnot/BUILD.bazel @@ -69,6 +69,7 @@ pl_cc_test( ":cc_library", "//src/carnot/exec:test_utils", "//src/carnot/udf_exporter:cc_library", + "//src/common/testing/event:cc_library", ], ) @@ -79,6 +80,7 @@ pl_cc_test( ":cc_library", "//src/carnot/exec:test_utils", "//src/carnot/udf_exporter:cc_library", + "//src/common/testing/event:cc_library", ], ) diff --git a/src/carnot/carnot_test.cc b/src/carnot/carnot_test.cc index 3ea11080844..9d32031bfc4 100644 --- a/src/carnot/carnot_test.cc +++ b/src/carnot/carnot_test.cc @@ -211,7 +211,7 @@ px.display(df, 'range_output'))pxl"; std::vector col0_out1; std::vector col1_out1; std::vector col2_out1; - table_store::Cursor cursor(big_table_.get()); + table_store::Table::Cursor cursor(big_table_.get()); auto batch = cursor.GetNextRowBatch({0}).ConsumeValueOrDie(); for (int64_t i = 0; i < batch->ColumnAt(0)->length(); i++) { if (CarnotTestUtils::big_test_col1[i].val >= 2 && CarnotTestUtils::big_test_col1[i].val < 12) { diff --git a/src/carnot/exec/BUILD.bazel b/src/carnot/exec/BUILD.bazel index 9eef30d5f16..625a6964d59 100644 --- a/src/carnot/exec/BUILD.bazel +++ b/src/carnot/exec/BUILD.bazel @@ -228,7 +228,6 @@ pl_cc_test( deps = [ ":cc_library", ":test_utils", - "//src/common/testing/event:cc_library", "//src/carnot/planpb:plan_testutils", "@com_github_apache_arrow//:arrow", ], @@ -299,7 +298,6 @@ pl_cc_test( ":exec_node_test_helpers", ":test_utils", "//src/carnot/planpb:plan_testutils", - "//src/common/testing/event:cc_library", "@com_github_apache_arrow//:arrow", "@com_github_grpc_grpc//:grpc++_test", ], diff --git a/src/carnot/exec/exec_graph_test.cc b/src/carnot/exec/exec_graph_test.cc index d578dbac57c..d5c7abb8d89 100644 --- a/src/carnot/exec/exec_graph_test.cc +++ b/src/carnot/exec/exec_graph_test.cc @@ -38,7 +38,6 @@ #include "src/carnot/udf/base.h" #include "src/carnot/udf/registry.h" #include "src/carnot/udf/udf.h" -#include "src/common/testing/event/simulated_time_system.h" #include "src/common/testing/testing.h" #include "src/shared/types/arrow_adapter.h" #include "src/shared/types/types.h" @@ -78,12 +77,6 @@ class BaseExecGraphTest : public ::testing::Test { exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); } std::unique_ptr func_registry_; @@ -157,7 +150,7 @@ TEST_P(ExecGraphExecuteTest, execute) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = table_store::HotColdTable::Create("test", rel); + auto table = Table::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -182,12 +175,6 @@ TEST_P(ExecGraphExecuteTest, execute) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); EXPECT_OK(exec_state_->AddScalarUDF( 0, "add", std::vector({types::DataType::INT64, types::DataType::FLOAT64}))); @@ -200,14 +187,11 @@ TEST_P(ExecGraphExecuteTest, execute) { /* collect_exec_node_stats */ false, calls_to_generate); EXPECT_OK(e.Execute()); - auto stats = e.GetStats(); - EXPECT_EQ(stats.bytes_processed, 85); - EXPECT_EQ(stats.rows_processed, 5); auto output_table = exec_state_->table_store()->GetTable("output"); std::vector out_in1 = {4.8, 16.4, 26.4}; std::vector out_in2 = {14.8, 12.4}; - table_store::Cursor cursor(output_table); + table_store::Table::Cursor cursor(output_table); EXPECT_TRUE(cursor.GetNextRowBatch({0}).ConsumeValueOrDie()->ColumnAt(0)->Equals( types::ToArrow(out_in1, arrow::default_memory_pool()))); EXPECT_TRUE(cursor.GetNextRowBatch({0}).ConsumeValueOrDie()->ColumnAt(0)->Equals( @@ -245,7 +229,7 @@ TEST_F(ExecGraphTest, execute_time) { table_store::schema::Relation rel( {types::DataType::TIME64NS, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = table_store::HotColdTable::Create("test", rel); + auto table = Table::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {types::Time64NSValue(1), types::Time64NSValue(2), @@ -272,12 +256,6 @@ TEST_F(ExecGraphTest, execute_time) { auto exec_state_ = std::make_unique( func_registry.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); EXPECT_OK(exec_state_->AddScalarUDF( 0, "add", std::vector({types::DataType::INT64, types::DataType::FLOAT64}))); @@ -294,7 +272,7 @@ TEST_F(ExecGraphTest, execute_time) { auto output_table = exec_state_->table_store()->GetTable("output"); std::vector out_in1 = {4.8, 16.4, 26.4}; std::vector out_in2 = {14.8, 12.4}; - table_store::Cursor cursor(output_table); + table_store::Table::Cursor cursor(output_table); EXPECT_TRUE(cursor.GetNextRowBatch({0}).ConsumeValueOrDie()->ColumnAt(0)->Equals( types::ToArrow(out_in1, arrow::default_memory_pool()))); EXPECT_TRUE(cursor.GetNextRowBatch({0}).ConsumeValueOrDie()->ColumnAt(0)->Equals( @@ -320,7 +298,7 @@ TEST_F(ExecGraphTest, two_limits_dont_interfere) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = table_store::HotColdTable::Create("test", rel); + auto table = Table::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -345,12 +323,6 @@ TEST_F(ExecGraphTest, two_limits_dont_interfere) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); ExecutionGraph e; auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), @@ -363,8 +335,8 @@ TEST_F(ExecGraphTest, two_limits_dont_interfere) { std::vector out_col1 = {1, 2}; std::vector out_col2 = {true, false}; std::vector out_col3 = {1.4, 6.2}; - table_store::Cursor cursor1(output_table1); - table_store::Cursor cursor2(output_table2); + table_store::Table::Cursor cursor1(output_table1); + table_store::Table::Cursor cursor2(output_table2); auto out_rb1 = cursor1.GetNextRowBatch(std::vector({0, 1, 2})).ConsumeValueOrDie(); auto out_rb2 = cursor2.GetNextRowBatch(std::vector({0, 1, 2})).ConsumeValueOrDie(); @@ -394,7 +366,7 @@ TEST_F(ExecGraphTest, limit_w_multiple_srcs) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = table_store::HotColdTable::Create("test", rel); + auto table = Table::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -419,12 +391,6 @@ TEST_F(ExecGraphTest, limit_w_multiple_srcs) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); ExecutionGraph e; auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), @@ -436,7 +402,7 @@ TEST_F(ExecGraphTest, limit_w_multiple_srcs) { std::vector out_col1 = {1, 2}; std::vector out_col2 = {true, false}; std::vector out_col3 = {1.4, 6.2}; - table_store::Cursor cursor(output_table); + table_store::Table::Cursor cursor(output_table); auto out_rb = cursor.GetNextRowBatch(std::vector({0, 1, 2})).ConsumeValueOrDie(); EXPECT_TRUE(out_rb->ColumnAt(0)->Equals(types::ToArrow(out_col1, arrow::default_memory_pool()))); EXPECT_TRUE(out_rb->ColumnAt(1)->Equals(types::ToArrow(out_col2, arrow::default_memory_pool()))); @@ -461,7 +427,7 @@ TEST_F(ExecGraphTest, two_sequential_limits) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = table_store::HotColdTable::Create("test", rel); + auto table = Table::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -487,12 +453,6 @@ TEST_F(ExecGraphTest, two_sequential_limits) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); ExecutionGraph e; auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), @@ -504,7 +464,7 @@ TEST_F(ExecGraphTest, two_sequential_limits) { std::vector out_col1 = {1, 2}; std::vector out_col2 = {true, false}; std::vector out_col3 = {1.4, 6.2}; - table_store::Cursor cursor(output_table); + table_store::Table::Cursor cursor(output_table); auto out_rb = cursor.GetNextRowBatch({0, 1, 2}).ConsumeValueOrDie(); EXPECT_TRUE(out_rb->ColumnAt(0)->Equals(types::ToArrow(out_col1, arrow::default_memory_pool()))); EXPECT_TRUE(out_rb->ColumnAt(1)->Equals(types::ToArrow(out_col2, arrow::default_memory_pool()))); @@ -530,7 +490,7 @@ TEST_F(ExecGraphTest, execute_with_two_limits) { table_store::schema::Relation rel( {types::DataType::INT64, types::DataType::BOOLEAN, types::DataType::FLOAT64}, {"col1", "col2", "col3"}); - auto table = table_store::HotColdTable::Create("test", rel); + auto table = Table::Create("test", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -556,12 +516,6 @@ TEST_F(ExecGraphTest, execute_with_two_limits) { auto exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); ExecutionGraph e; auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), @@ -572,179 +526,14 @@ TEST_F(ExecGraphTest, execute_with_two_limits) { auto output_table_1 = exec_state_->table_store()->GetTable("output1"); auto output_table_2 = exec_state_->table_store()->GetTable("output2"); std::vector out_in1 = {1.4, 6.2}; - table_store::Cursor cursor1(output_table_1); + table_store::Table::Cursor cursor1(output_table_1); EXPECT_TRUE(cursor1.GetNextRowBatch({2}).ConsumeValueOrDie()->ColumnAt(0)->Equals( types::ToArrow(out_in1, arrow::default_memory_pool()))); - table_store::Cursor cursor2(output_table_2); + table_store::Table::Cursor cursor2(output_table_2); EXPECT_TRUE(cursor2.GetNextRowBatch({2}).ConsumeValueOrDie()->ColumnAt(0)->Equals( types::ToArrow(out_in1, arrow::default_memory_pool()))); } -TEST_F(ExecGraphTest, execute_with_timed_sink_node_no_prior_results_table) { - planpb::PlanFragment pf_pb; - ASSERT_TRUE(TextFormat::MergeFromString(planpb::testutils::kPlanWithOTelExport, &pf_pb)); - std::shared_ptr plan_fragment_ = std::make_shared(1); - ASSERT_OK(plan_fragment_->Init(pf_pb)); - - auto plan_state = std::make_unique(func_registry_.get()); - - auto schema = std::make_shared(); - schema->AddRelation( - 1, table_store::schema::Relation( - std::vector( - {types::DataType::STRING, types::DataType::BOOLEAN, types::DataType::FLOAT64}), - std::vector({"a", "b", "c"}))); - - table_store::schema::Relation rel( - {types::DataType::STRING, types::DataType::BOOLEAN, types::DataType::FLOAT64}, - {"col1", "col2", "col3"}); - auto table = table_store::HotColdTable::Create("test", rel); - - auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); - std::vector col1_in1 = {"service a", "service b", "service c"}; - std::vector col2_in1 = {true, false, true}; - std::vector col3_in1 = {1.4, 6.2, 10.2}; - - EXPECT_OK(rb1.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); - EXPECT_OK(rb1.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); - EXPECT_OK(rb1.AddColumn(types::ToArrow(col3_in1, arrow::default_memory_pool()))); - EXPECT_OK(table->WriteRowBatch(rb1)); - - auto rb2 = RowBatch(RowDescriptor(rel.col_types()), 2); - std::vector col1_in2 = {"service a", "service b"}; - std::vector col2_in2 = {false, false}; - std::vector col3_in2 = {3.4, 1.2}; - EXPECT_OK(rb2.AddColumn(types::ToArrow(col1_in2, arrow::default_memory_pool()))); - EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); - EXPECT_OK(rb2.AddColumn(types::ToArrow(col3_in2, arrow::default_memory_pool()))); - EXPECT_OK(table->WriteRowBatch(rb2)); - - auto table_store = std::make_shared(); - table_store->AddTable("numbers", table); - auto exec_state_ = std::make_unique( - func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, - MockTraceStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); - - ExecutionGraph e; - auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), - /* collect_exec_node_stats */ false); - - EXPECT_OK(e.Execute()); - - auto output_table_1 = exec_state_->table_store()->GetTable("sink_results"); - EXPECT_NE(output_table_1, nullptr); - std::vector out1_in1 = {54}; - std::vector out1_in2 = {54}; - std::vector out1_in3 = {36}; - std::vector out2_in1 = {planpb::OperatorType::MEMORY_SOURCE_OPERATOR}; - std::vector out2_in2 = {planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR}; - std::vector out2_in3 = {planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR}; - table_store::Cursor cursor1(output_table_1); - auto rb_out1 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); - EXPECT_TRUE(rb_out1->ColumnAt(0)->Equals(types::ToArrow(out1_in1, arrow::default_memory_pool()))); - EXPECT_TRUE(rb_out1->ColumnAt(1)->Equals(types::ToArrow(out2_in1, arrow::default_memory_pool()))); - auto rb_out2 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); - EXPECT_TRUE(rb_out2->ColumnAt(0)->Equals(types::ToArrow(out1_in2, arrow::default_memory_pool()))); - EXPECT_TRUE(rb_out2->ColumnAt(1)->Equals(types::ToArrow(out2_in2, arrow::default_memory_pool()))); - auto rb_out3 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); - EXPECT_TRUE(rb_out3->ColumnAt(0)->Equals(types::ToArrow(out1_in3, arrow::default_memory_pool()))); - EXPECT_TRUE(rb_out3->ColumnAt(1)->Equals(types::ToArrow(out2_in3, arrow::default_memory_pool()))); -} - -TEST_F(ExecGraphTest, execute_with_timed_sink_node_prior_results_table) { - planpb::PlanFragment pf_pb; - ASSERT_TRUE(TextFormat::MergeFromString(planpb::testutils::kPlanWithOTelExport, &pf_pb)); - std::shared_ptr plan_fragment_ = std::make_shared(1); - ASSERT_OK(plan_fragment_->Init(pf_pb)); - - auto plan_state = std::make_unique(func_registry_.get()); - - auto schema = std::make_shared(); - schema->AddRelation( - 1, table_store::schema::Relation( - std::vector( - {types::DataType::STRING, types::DataType::BOOLEAN, types::DataType::FLOAT64}), - std::vector({"a", "b", "c"}))); - - table_store::schema::Relation rel( - {types::DataType::STRING, types::DataType::BOOLEAN, types::DataType::FLOAT64}, - {"col1", "col2", "col3"}); - auto table = table_store::HotColdTable::Create("test", rel); - - auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); - std::vector col1_in1 = {"service a", "service b", "service c"}; - std::vector col2_in1 = {true, false, true}; - std::vector col3_in1 = {1.4, 6.2, 10.2}; - - EXPECT_OK(rb1.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); - EXPECT_OK(rb1.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); - EXPECT_OK(rb1.AddColumn(types::ToArrow(col3_in1, arrow::default_memory_pool()))); - EXPECT_OK(table->WriteRowBatch(rb1)); - - auto rb2 = RowBatch(RowDescriptor(rel.col_types()), 2); - std::vector col1_in2 = {"service a", "service b"}; - std::vector col2_in2 = {false, false}; - std::vector col3_in2 = {3.4, 1.2}; - EXPECT_OK(rb2.AddColumn(types::ToArrow(col1_in2, arrow::default_memory_pool()))); - EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); - EXPECT_OK(rb2.AddColumn(types::ToArrow(col3_in2, arrow::default_memory_pool()))); - EXPECT_OK(table->WriteRowBatch(rb2)); - - std::vector sink_results_col_names = {"time_", "upid", "bytes_transferred", "destination", - "stream_id"}; - table_store::schema::Relation sink_results_rel( - {types::DataType::TIME64NS, types::DataType::UINT128, types::DataType::INT64, types::DataType::INT64, types::DataType::STRING}, - sink_results_col_names); - auto sink_results_table = table_store::HotColdTable::Create("sink_results", sink_results_rel); - - auto table_store = std::make_shared(); - table_store->AddTable("numbers", table); - table_store->AddTable("sink_results", sink_results_table); - auto exec_state_ = std::make_unique( - func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, - MockTraceStubGenerator, sole::uuid4(), nullptr); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); - - ExecutionGraph e; - auto s = e.Init(schema.get(), plan_state.get(), exec_state_.get(), plan_fragment_.get(), - /* collect_exec_node_stats */ false); - - EXPECT_OK(e.Execute()); - - auto output_table_1 = exec_state_->table_store()->GetTable("sink_results"); - EXPECT_NE(output_table_1, nullptr); - std::vector out1_in1 = {54}; - std::vector out1_in2 = {54}; - std::vector out1_in3 = {36}; - std::vector out2_in1 = {planpb::OperatorType::MEMORY_SOURCE_OPERATOR}; - std::vector out2_in2 = {planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR}; - std::vector out2_in3 = {planpb::OperatorType::OTEL_EXPORT_SINK_OPERATOR}; - table_store::Cursor cursor1(output_table_1); - auto rb_out1 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); - LOG(INFO) << rb_out1->DebugString(); - EXPECT_TRUE(rb_out1->ColumnAt(0)->Equals(types::ToArrow(out1_in1, arrow::default_memory_pool()))); - EXPECT_TRUE(rb_out1->ColumnAt(1)->Equals(types::ToArrow(out2_in1, arrow::default_memory_pool()))); - auto rb_out2 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); - LOG(INFO) << rb_out2->DebugString(); - EXPECT_TRUE(rb_out2->ColumnAt(0)->Equals(types::ToArrow(out1_in2, arrow::default_memory_pool()))); - EXPECT_TRUE(rb_out2->ColumnAt(1)->Equals(types::ToArrow(out2_in2, arrow::default_memory_pool()))); - auto rb_out3 = cursor1.GetNextRowBatch({2, 3}).ConsumeValueOrDie(); - LOG(INFO) << rb_out3->DebugString(); - EXPECT_TRUE(rb_out3->ColumnAt(0)->Equals(types::ToArrow(out1_in3, arrow::default_memory_pool()))); - EXPECT_TRUE(rb_out3->ColumnAt(1)->Equals(types::ToArrow(out2_in3, arrow::default_memory_pool()))); -} - class YieldingExecGraphTest : public BaseExecGraphTest { protected: void SetUp() { SetUpExecState(); } @@ -914,12 +703,6 @@ class GRPCExecGraphTest : public ::testing::Test { exec_state_ = std::make_unique( func_registry_.get(), table_store, MockResultSinkStubGenerator, MockMetricsStubGenerator, MockTraceStubGenerator, MockLogStubGenerator, sole::uuid4(), nullptr, grpc_router_.get()); - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); } void SetUpPlanFragment() { diff --git a/src/carnot/exec/exec_node.h b/src/carnot/exec/exec_node.h index 764c865229c..34c692c61ce 100644 --- a/src/carnot/exec/exec_node.h +++ b/src/carnot/exec/exec_node.h @@ -18,7 +18,6 @@ #pragma once -#include #include #include #include @@ -29,18 +28,6 @@ #include "src/common/perf/perf.h" #include "src/table_store/table_store.h" -namespace px::carnot::exec { -// Forward declaration so enum_range can be specialized. -enum class SinkResultsDestType : uint64_t; - -} // namespace px::carot::exec - -template <> -struct magic_enum::customize::enum_range { - static constexpr int min = 1000; - static constexpr int max = 11000; -}; - namespace px { namespace carnot { namespace exec { @@ -140,29 +127,10 @@ struct ExecNodeStats { absl::flat_hash_map extra_info; }; -enum class SinkResultsDestType : uint64_t { - amqp_events = 10001, // TODO(ddelnano): This is set to not collide with the planpb::OperatorType enum - cql_events, - dns_events, - http_events, - kafka_events, // Won't work since table is suffixed with ".beta" - mongodb_events, - mux_events, - mysql_events, - nats_events, // Won't work since table is suffixed with ".beta" - pgsql_events, - redis_events, -}; - /** * This is the base class for the execution nodes in Carnot. */ class ExecNode { - const std::string kContextKey = "mutation_id"; - const std::string kSinkResultsTableName = "sink_results"; - const std::vector sink_results_col_names = {"time_", "upid", "bytes_transferred", - "destination", "stream_id"}; - public: ExecNode() = delete; virtual ~ExecNode() = default; @@ -175,27 +143,9 @@ class ExecNode { * @return */ Status Init(const plan::Operator& plan_node, - const table_store::schema::RowDescriptor& output_descriptor, - std::vector input_descriptors, - bool collect_exec_stats = false) { - auto op_type = plan_node.op_type(); - // TODO(ddelnano): Replace this with a template based compile time check - // to ensure that there can't be segfaults on the subsequent static_casts - if (op_type == planpb::MEMORY_SOURCE_OPERATOR || op_type == planpb::GRPC_SINK_OPERATOR || - op_type == planpb::MEMORY_SINK_OPERATOR || op_type == planpb::OTEL_EXPORT_SINK_OPERATOR) { - const auto* sink_op = static_cast(&plan_node); - context_ = sink_op->context(); - auto op_type = plan_node.op_type(); - destination_ = static_cast(op_type); - if (op_type == planpb::MEMORY_SOURCE_OPERATOR) { - const auto* memory_source_op = static_cast(&plan_node); - auto table_name = memory_source_op->TableName(); - auto protocol_events = magic_enum::enum_cast(table_name); - if (protocol_events.has_value()) { - destination_ = static_cast(protocol_events.value()); - } - } - } + const table_store::schema::RowDescriptor& output_descriptor, + std::vector input_descriptors, + bool collect_exec_stats = false) { is_initialized_ = true; output_descriptor_ = std::make_unique(output_descriptor); input_descriptors_ = input_descriptors; @@ -210,9 +160,6 @@ class ExecNode { */ Status Prepare(ExecState* exec_state) { DCHECK(is_initialized_); - if (context_.find(kContextKey) != context_.end()) { - SetUpStreamResultsTable(exec_state); - } return PrepareImpl(exec_state); } @@ -264,7 +211,7 @@ class ExecNode { * @return The Status of consumption. */ Status ConsumeNext(ExecState* exec_state, const table_store::schema::RowBatch& rb, - size_t parent_index) { + size_t parent_index) { DCHECK(is_initialized_); DCHECK(type() == ExecNodeType::kSinkNode || type() == ExecNodeType::kProcessingNode); if (rb.eos() && !rb.eow()) { @@ -275,8 +222,6 @@ class ExecNode { stats_->ResumeTotalTimer(); PX_RETURN_IF_ERROR(ConsumeNextImpl(exec_state, rb, parent_index)); stats_->StopTotalTimer(); - PX_RETURN_IF_ERROR( - RecordSinkResults(rb, exec_state->time_now(), exec_state->GetAgentUPID().value())); return Status::OK(); } @@ -337,8 +282,7 @@ class ExecNode { * @param rb The row batch to send. * @return Status of children execution. */ - Status SendRowBatchToChildren(ExecState* exec_state, - const table_store::schema::RowBatch& rb) { + Status SendRowBatchToChildren(ExecState* exec_state, const table_store::schema::RowBatch& rb) { stats_->ResumeChildTimer(); for (size_t i = 0; i < children_.size(); ++i) { PX_RETURN_IF_ERROR(children_[i]->ConsumeNext(exec_state, rb, parent_ids_for_children_[i])); @@ -349,16 +293,10 @@ class ExecNode { DCHECK(!sent_eos_); sent_eos_ = true; } - PX_RETURN_IF_ERROR( - RecordSinkResults(rb, exec_state->time_now(), exec_state->GetAgentUPID().value())); return Status::OK(); } - explicit ExecNode(ExecNodeType type) - : type_(type), - rel_({types::DataType::TIME64NS, types::DataType::UINT128, types::DataType::INT64, - types::DataType::INT64, types::DataType::STRING}, - sink_results_col_names) {} + explicit ExecNode(ExecNodeType type) : type_(type) {} // Defines the protected implementations of the non-virtual interface functions // defined above. @@ -383,43 +321,6 @@ class ExecNode { bool sent_eos_ = false; private: - void SetUpStreamResultsTable(ExecState* exec_state) { - auto sink_results = exec_state->table_store()->GetTable(kSinkResultsTableName); - if (sink_results != nullptr) { - table_ = sink_results; - } else { - auto table = table_store::HotColdTable::Create(kSinkResultsTableName, rel_); - exec_state->table_store()->AddTable(kSinkResultsTableName, table); - table_ = table.get(); - } - } - - Status RecordSinkResults(const table_store::schema::RowBatch& rb, - const types::Time64NSValue time_now, const types::UInt128Value upid) { - if (table_ != nullptr && context_.find(kContextKey) != context_.end()) { - auto mutation_id = context_[kContextKey]; - std::vector col1_in1 = {time_now}; - std::vector col2_in1 = {upid}; - std::vector col3_in1 = {rb.NumBytes()}; - std::vector col4_in1 = {destination_}; - std::vector col5_in1 = {mutation_id}; - auto rb_sink_stats = - table_store::schema::RowBatch(table_store::schema::RowDescriptor(rel_.col_types()), 1); - PX_RETURN_IF_ERROR( - rb_sink_stats.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); - PX_RETURN_IF_ERROR( - rb_sink_stats.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); - PX_RETURN_IF_ERROR( - rb_sink_stats.AddColumn(types::ToArrow(col3_in1, arrow::default_memory_pool()))); - PX_RETURN_IF_ERROR( - rb_sink_stats.AddColumn(types::ToArrow(col4_in1, arrow::default_memory_pool()))); - PX_RETURN_IF_ERROR( - rb_sink_stats.AddColumn(types::ToArrow(col5_in1, arrow::default_memory_pool()))); - PX_RETURN_IF_ERROR(table_->WriteRowBatch(rb_sink_stats)); - } - return Status::OK(); - } - // The stats of this exec node. std::unique_ptr stats_; // Unowned reference to the children. Must remain valid for the duration of query. @@ -433,16 +334,6 @@ class ExecNode { ExecNodeType type_; // Whether this node has been initialized. bool is_initialized_ = false; - - // The context key, value pairs passed to the operator node. - // This is currently used to store the mutation_id. - std::map context_; - - // The operator type of the current node - uint64_t destination_; - - table_store::Table* table_; - table_store::schema::Relation rel_; }; /** diff --git a/src/carnot/exec/exec_state.h b/src/carnot/exec/exec_state.h index 2ecb5713918..444d9298d06 100644 --- a/src/carnot/exec/exec_state.h +++ b/src/carnot/exec/exec_state.h @@ -73,9 +73,8 @@ class ExecState { udf::Registry* func_registry, std::shared_ptr table_store, const ResultSinkStubGenerator& stub_generator, const MetricsStubGenerator& metrics_stub_generator, - const TraceStubGenerator& trace_stub_generator, - const LogsStubGenerator& logs_stub_generator, const sole::uuid& query_id, - udf::ModelPool* model_pool, GRPCRouter* grpc_router = nullptr, + const TraceStubGenerator& trace_stub_generator, const LogsStubGenerator& logs_stub_generator, + const sole::uuid& query_id, udf::ModelPool* model_pool, GRPCRouter* grpc_router = nullptr, std::function add_auth_func = [](grpc::ClientContext*) {}, ExecMetrics* exec_metrics = nullptr) : func_registry_(func_registry), @@ -88,8 +87,7 @@ class ExecState { model_pool_(model_pool), grpc_router_(grpc_router), add_auth_to_grpc_client_context_func_(add_auth_func), - exec_metrics_(exec_metrics), - time_now_(px::CurrentTimeNS()) {} + exec_metrics_(exec_metrics) {} ~ExecState() { if (grpc_router_ != nullptr) { @@ -213,8 +211,6 @@ class ExecState { metadata_state_ = metadata_state; } - md::UPID GetAgentUPID() const { return metadata_state_->agent_upid(); } - GRPCRouter* grpc_router() { return grpc_router_; } void AddAuthToGRPCClientContext(grpc::ClientContext* ctx) { @@ -224,8 +220,6 @@ class ExecState { ExecMetrics* exec_metrics() { return exec_metrics_; } - types::Time64NSValue time_now() const { return time_now_; } - private: udf::Registry* func_registry_; std::shared_ptr table_store_; diff --git a/src/carnot/exec/grpc_sink_node_benchmark.cc b/src/carnot/exec/grpc_sink_node_benchmark.cc index 77447969f47..96707f0d896 100644 --- a/src/carnot/exec/grpc_sink_node_benchmark.cc +++ b/src/carnot/exec/grpc_sink_node_benchmark.cc @@ -76,8 +76,7 @@ void BM_GRPCSinkNodeSplitting(benchmark::State& state) { px::carnot::exec::GRPCSinkNode node; auto op_proto = px::carnot::planpb::testutils::CreateTestGRPCSink2PB(); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(op_proto.grpc_sink_op()); auto num_rows = 1024; diff --git a/src/carnot/exec/grpc_sink_node_test.cc b/src/carnot/exec/grpc_sink_node_test.cc index f4b398ecae4..62d6a2e2c12 100644 --- a/src/carnot/exec/grpc_sink_node_test.cc +++ b/src/carnot/exec/grpc_sink_node_test.cc @@ -18,7 +18,6 @@ #include "src/carnot/exec/grpc_sink_node.h" -#include #include #include @@ -163,8 +162,7 @@ query_result { TEST_F(GRPCSinkNodeTest, internal_result) { auto op_proto = planpb::testutils::CreateTestGRPCSink1PB(); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -296,8 +294,7 @@ query_result { TEST_F(GRPCSinkNodeTest, external_result) { auto op_proto = planpb::testutils::CreateTestGRPCSink2PB(); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -355,8 +352,7 @@ TEST_F(GRPCSinkNodeTest, external_result) { TEST_F(GRPCSinkNodeTest, check_connection) { auto op_proto = planpb::testutils::CreateTestGRPCSink2PB(); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -396,8 +392,7 @@ TEST_F(GRPCSinkNodeTest, check_connection) { TEST_F(GRPCSinkNodeTest, update_connection_time) { auto op_proto = planpb::testutils::CreateTestGRPCSink2PB(); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -449,8 +444,7 @@ class GRPCSinkNodeSplitTest : public GRPCSinkNodeTest, TEST_P(GRPCSinkNodeSplitTest, break_up_batches) { auto op_proto = planpb::testutils::CreateTestGRPCSink1PB(); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(op_proto.grpc_sink_op()); auto test_case = GetParam(); @@ -658,8 +652,7 @@ INSTANTIATE_TEST_SUITE_P(SplitBatchesTest, GRPCSinkNodeSplitTest, TEST_F(GRPCSinkNodeTest, retry_failed_writes) { auto op_proto = planpb::testutils::CreateTestGRPCSink1PB(); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); @@ -731,8 +724,7 @@ TEST_F(GRPCSinkNodeTest, retry_failed_writes) { TEST_F(GRPCSinkNodeTest, check_connection_after_eos) { auto op_proto = planpb::testutils::CreateTestGRPCSink2PB(); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(op_proto.grpc_sink_op()); RowDescriptor input_rd({types::DataType::INT64}); RowDescriptor output_rd({types::DataType::INT64}); diff --git a/src/carnot/exec/memory_sink_node.cc b/src/carnot/exec/memory_sink_node.cc index 910b70c3f30..6f0fb54c5e9 100644 --- a/src/carnot/exec/memory_sink_node.cc +++ b/src/carnot/exec/memory_sink_node.cc @@ -62,8 +62,7 @@ Status MemorySinkNode::PrepareImpl(ExecState* exec_state_) { col_names.push_back(plan_node_->ColumnName(i)); } - table_ = table_store::HotColdTable::Create(TableName(), - Relation(input_descriptor_->types(), col_names)); + table_ = Table::Create(TableName(), Relation(input_descriptor_->types(), col_names)); exec_state_->table_store()->AddTable(plan_node_->TableName(), table_); return Status::OK(); diff --git a/src/carnot/exec/memory_sink_node_test.cc b/src/carnot/exec/memory_sink_node_test.cc index a318375b249..e2587dbf132 100644 --- a/src/carnot/exec/memory_sink_node_test.cc +++ b/src/carnot/exec/memory_sink_node_test.cc @@ -85,7 +85,7 @@ TEST_F(MemorySinkNodeTest, basic) { false, 0); auto table = exec_state_->table_store()->GetTable("cpu_15s"); - table_store::Cursor cursor(table); + table_store::Table::Cursor cursor(table); auto batch_or_s = cursor.GetNextRowBatch({0, 1}); EXPECT_OK(batch_or_s); auto batch = batch_or_s.ConsumeValueOrDie(); @@ -104,7 +104,7 @@ TEST_F(MemorySinkNodeTest, basic) { .Close(); // Update stop spec of the cursor to include the new row batch. - cursor.UpdateStopSpec(table_store::Cursor::StopSpec{}); + cursor.UpdateStopSpec(table_store::Table::Cursor::StopSpec{}); batch_or_s = cursor.GetNextRowBatch({0, 1}); EXPECT_OK(batch_or_s); batch = batch_or_s.ConsumeValueOrDie(); @@ -147,7 +147,7 @@ TEST_F(MemorySinkNodeTest, zero_row_row_batch_not_eos) { .Close(); auto table = exec_state_->table_store()->GetTable("cpu_15s"); - table_store::Cursor cursor(table); + table_store::Table::Cursor cursor(table); auto batch_or_s = cursor.GetNextRowBatch({0, 1}); EXPECT_OK(batch_or_s); auto batch = batch_or_s.ConsumeValueOrDie(); diff --git a/src/carnot/exec/memory_source_node.cc b/src/carnot/exec/memory_source_node.cc index 2c9f02df14e..97ad0513b50 100644 --- a/src/carnot/exec/memory_source_node.cc +++ b/src/carnot/exec/memory_source_node.cc @@ -32,8 +32,8 @@ namespace px { namespace carnot { namespace exec { -using StartSpec = table_store::Cursor::StartSpec; -using StopSpec = table_store::Cursor::StopSpec; +using StartSpec = Table::Cursor::StartSpec; +using StopSpec = Table::Cursor::StopSpec; std::string MemorySourceNode::DebugStringImpl() { return absl::Substitute("Exec::MemorySourceNode: ", plan_node_->TableName(), @@ -85,7 +85,7 @@ Status MemorySourceNode::OpenImpl(ExecState* exec_state) { stop_spec.type = StopSpec::StopType::CurrentEndOfTable; } } - cursor_ = std::make_unique(table_, start_spec, stop_spec); + cursor_ = std::make_unique(table_, start_spec, stop_spec); return Status::OK(); } diff --git a/src/carnot/exec/memory_source_node.h b/src/carnot/exec/memory_source_node.h index edbea4375d5..ccb059827f3 100644 --- a/src/carnot/exec/memory_source_node.h +++ b/src/carnot/exec/memory_source_node.h @@ -60,7 +60,7 @@ class MemorySourceNode : public SourceNode { // Whether this memory source will stream future results. bool streaming_ = false; - std::unique_ptr cursor_; + std::unique_ptr cursor_; std::unique_ptr plan_node_; table_store::Table* table_ = nullptr; diff --git a/src/carnot/exec/memory_source_node_test.cc b/src/carnot/exec/memory_source_node_test.cc index 418de849ee5..df86c58c23c 100644 --- a/src/carnot/exec/memory_source_node_test.cc +++ b/src/carnot/exec/memory_source_node_test.cc @@ -59,8 +59,7 @@ class MemorySourceNodeTest : public ::testing::Test { {"col1", "time_"}); int64_t compaction_size = 2 * sizeof(bool) + 2 * sizeof(int64_t); - cpu_table_ = - std::make_shared("cpu", rel, 128 * 1024, compaction_size); + cpu_table_ = std::make_shared
("cpu", rel, 128 * 1024, compaction_size); exec_state_->table_store()->AddTable("cpu", cpu_table_); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); @@ -77,7 +76,7 @@ class MemorySourceNodeTest : public ::testing::Test { EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); EXPECT_OK(cpu_table_->WriteRowBatch(rb2)); - exec_state_->table_store()->AddTable("empty", table_store::HotColdTable::Create("empty", rel)); + exec_state_->table_store()->AddTable("empty", Table::Create("empty", rel)); } std::shared_ptr
cpu_table_; @@ -238,7 +237,7 @@ class MemorySourceNodeTabletTest : public ::testing::Test { rel = table_store::schema::Relation({types::DataType::BOOLEAN, types::DataType::TIME64NS}, {"col1", "time_"}); - std::shared_ptr
tablet = table_store::HotColdTable::Create(table_name_, rel); + std::shared_ptr
tablet = Table::Create(table_name_, rel); AddValuesToTable(tablet.get()); exec_state_->table_store()->AddTable(tablet, table_name_, table_id_, tablet_id_); @@ -297,7 +296,7 @@ TEST_F(MemorySourceNodeTabletTest, basic_tablet_test) { TEST_F(MemorySourceNodeTabletTest, multiple_tablet_test) { types::TabletID new_tablet_id = "456"; EXPECT_NE(tablet_id_, new_tablet_id); - std::shared_ptr
new_tablet = table_store::HotColdTable::Create(tablet_id_, rel); + std::shared_ptr
new_tablet = Table::Create(tablet_id_, rel); auto wrapper_batch_1 = std::make_unique(); auto col_wrapper_1 = std::make_shared(0); @@ -459,8 +458,7 @@ class ParamMemorySourceNodeTest : public ::testing::Test, std::vector{types::DataType::TIME64NS}, std::vector{"time_"}); int64_t compaction_size = 2 * sizeof(int64_t); - cpu_table_ = - std::make_shared("cpu", *rel_, 128 * 1024, compaction_size); + cpu_table_ = std::make_shared
("cpu", *rel_, 128 * 1024, compaction_size); exec_state_->table_store()->AddTable("cpu", cpu_table_); planpb::Operator op; diff --git a/src/carnot/exec/otel_export_sink_node.cc b/src/carnot/exec/otel_export_sink_node.cc index 3ba8ec2a297..77da9f12d0b 100644 --- a/src/carnot/exec/otel_export_sink_node.cc +++ b/src/carnot/exec/otel_export_sink_node.cc @@ -465,8 +465,7 @@ Status OTelExportSinkNode::ConsumeLogs(ExecState* exec_state, const RowBatch& rb AddAttributes(log->mutable_attributes(), log_pb.attributes(), rb, row_idx); auto time_col = rb.ColumnAt(log_pb.time_column_index()).get(); - log->set_time_unix_nano( - types::GetValueFromArrowArray(time_col, row_idx)); + log->set_time_unix_nano(types::GetValueFromArrowArray(time_col, row_idx)); if (log_pb.observed_time_column_index() >= 0) { auto observed_time_col = rb.ColumnAt(log_pb.observed_time_column_index()).get(); log->set_observed_time_unix_nano( diff --git a/src/carnot/exec/otel_export_sink_node_test.cc b/src/carnot/exec/otel_export_sink_node_test.cc index 37ebfe3ea5b..9aeee55103e 100644 --- a/src/carnot/exec/otel_export_sink_node_test.cc +++ b/src/carnot/exec/otel_export_sink_node_test.cc @@ -42,7 +42,6 @@ #include "src/carnot/planpb/plan.pb.h" #include "src/carnot/planpb/test_proto.h" #include "src/carnot/udf/registry.h" -#include "src/common/testing/event/simulated_time_system.h" #include "src/common/testing/testing.h" #include "src/common/uuid/uuid_utils.h" #include "src/shared/types/types.h" @@ -102,14 +101,6 @@ class OTelExportSinkNodeTest : public ::testing::Test { return std::move(logs_mock_unique_); }, sole::uuid4(), nullptr, nullptr, [](grpc::ClientContext*) {}); - - auto time_system = std::make_unique( - std::chrono::steady_clock::now(), std::chrono::system_clock::now()); - - auto metadata_state = std::make_shared( - "myhost", 1, 963, 0, sole::uuid4(), "mypod", sole::uuid4(), "myvizier", "myviziernamespace", - time_system.get()); - exec_state_->set_metadata_state(metadata_state); } protected: @@ -145,8 +136,7 @@ metrics { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_pb_txt, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); RowDescriptor input_rd({types::TIME64NS, types::FLOAT64}); RowDescriptor output_rd({}); @@ -195,8 +185,7 @@ metrics { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_pb_txt, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); RowDescriptor input_rd({types::TIME64NS, types::FLOAT64, types::STRING}); RowDescriptor output_rd({}); @@ -261,8 +250,7 @@ TEST_P(OTelMetricsTest, process_data) { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(tc.operator_proto, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); // Load a RowBatch to get the Input RowDescriptor. @@ -1045,8 +1033,7 @@ TEST_P(OTelSpanTest, process_data) { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(tc.operator_proto, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); // Load a RowBatch to get the Input RowDescriptor. @@ -1543,8 +1530,7 @@ TEST_P(SpanIDTests, generate_ids) { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(tc.operator_proto, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); // Load a RowBatch to get the Input RowDescriptor. @@ -1700,8 +1686,7 @@ spans { parent_span_id_column_index: -1 })pb"; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_proto, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); std::string row_batch = R"pb( cols { time64ns_data { data: 10 data: 20 } } @@ -1739,8 +1724,7 @@ metrics { gauge { int_column_index: 1 } })pb"; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_proto, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); std::string row_batch = R"pb( cols { time64ns_data { data: 10 data: 11 } } @@ -1790,8 +1774,7 @@ spans { parent_span_id_column_index: -1 })pb"; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_proto, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); std::string row_batch = R"pb( cols { time64ns_data { data: 10 data: 20 } } @@ -1842,8 +1825,7 @@ metrics { gauge { int_column_index: 1 } })pb"; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(operator_proto, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); std::string row_batch = R"pb( cols { time64ns_data { data: 10 data: 11 } } @@ -1889,8 +1871,7 @@ TEST_P(OTelLogTest, process_data) { planpb::OTelExportSinkOperator otel_sink_op; EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(tc.operator_proto, &otel_sink_op)); - std::map context; - auto plan_node = std::make_unique(1, context); + auto plan_node = std::make_unique(1); auto s = plan_node->Init(otel_sink_op); // Load a RowBatch to get the Input RowDescriptor. diff --git a/src/carnot/exec/test_utils.h b/src/carnot/exec/test_utils.h index e0958eaf8df..e2f6fae1289 100644 --- a/src/carnot/exec/test_utils.h +++ b/src/carnot/exec/test_utils.h @@ -122,7 +122,7 @@ class CarnotTestUtils { static std::shared_ptr TestTable() { table_store::schema::Relation rel({types::DataType::FLOAT64, types::DataType::INT64}, {"col1", "col2"}); - auto table = table_store::HotColdTable::Create("test_table", rel); + auto table = table_store::Table::Create("test_table", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {0.5, 1.2, 5.3}; @@ -143,7 +143,7 @@ class CarnotTestUtils { static std::shared_ptr TestDuration64Table() { table_store::schema::Relation rel({types::DataType::INT64}, {"col1"}); - auto table = table_store::HotColdTable::Create("test_table", rel); + auto table = table_store::Table::Create("test_table", rel); auto rb1 = RowBatch(RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {1, 2, 3}; @@ -166,7 +166,7 @@ class CarnotTestUtils { types::DataType::INT64, types::DataType::STRING}, {"time_", "col2", "col3", "num_groups", "string_groups"}); - auto table = table_store::HotColdTable::Create("test_table", rel); + auto table = table_store::Table::Create("test_table", rel); for (const auto& pair : split_idx) { auto rb = RowBatch(RowDescriptor(rel.col_types()), pair.second - pair.first); @@ -227,7 +227,7 @@ class CarnotTestUtils { "read_bytes", "write_bytes", }); - auto table = table_store::HotColdTable::Create("process_table", rel); + auto table = table_store::Table::Create("process_table", rel); return table; } @@ -248,7 +248,7 @@ class CarnotTestUtils { "req_path", "req_body", "req_body_size", "resp_headers", "resp_status", "resp_message", "resp_body", "resp_body_size", "latency", }); - auto table = table_store::HotColdTable::Create("http_events_table", rel); + auto table = table_store::Table::Create("http_events_table", rel); return table; } }; diff --git a/src/carnot/funcs/builtins/builtins.cc b/src/carnot/funcs/builtins/builtins.cc index 5f4b941c9b9..f871244bdaf 100644 --- a/src/carnot/funcs/builtins/builtins.cc +++ b/src/carnot/funcs/builtins/builtins.cc @@ -25,7 +25,6 @@ #include "src/carnot/funcs/builtins/ml_ops.h" #include "src/carnot/funcs/builtins/pii_ops.h" #include "src/carnot/funcs/builtins/pprof_ops.h" -#include "src/carnot/funcs/builtins/pipeline_ops.h" #include "src/carnot/funcs/builtins/regex_ops.h" #include "src/carnot/funcs/builtins/request_path_ops.h" #include "src/carnot/funcs/builtins/sql_ops.h" @@ -53,7 +52,6 @@ void RegisterBuiltinsOrDie(udf::Registry* registry) { RegisterPIIOpsOrDie(registry); RegisterURIOpsOrDie(registry); RegisterUtilOpsOrDie(registry); - RegisterPipelineOpsOrDie(registry); RegisterPProfOpsOrDie(registry); } diff --git a/src/carnot/funcs/builtins/pipeline_ops.cc b/src/carnot/funcs/builtins/pipeline_ops.cc deleted file mode 100644 index 8528ad6dd29..00000000000 --- a/src/carnot/funcs/builtins/pipeline_ops.cc +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ -#include -#include -#include - -#include -#include "src/carnot/funcs/builtins/pipeline_ops.h" - -namespace px { -namespace carnot { -namespace builtins { - -void RegisterPipelineOpsOrDie(udf::Registry* registry) { - CHECK(registry != nullptr); - /***************************************** - * Scalar UDFs. - *****************************************/ - registry->RegisterOrDie("pipeline_dest_to_name"); -} - -} // namespace builtins -} // namespace carnot -} // namespace px diff --git a/src/carnot/funcs/builtins/pipeline_ops.h b/src/carnot/funcs/builtins/pipeline_ops.h deleted file mode 100644 index eb479d4a083..00000000000 --- a/src/carnot/funcs/builtins/pipeline_ops.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ -#pragma once - -#include "src/carnot/udf/registry.h" -#include "src/common/base/utils.h" -#include "src/shared/types/types.h" - -namespace px::carnot::builtins { -// Forward declaration so enum_range can be specialized. -enum class SinkResultsDestType : uint64_t; - -} // namespace px::carot::builtins - -template <> -struct magic_enum::customize::enum_range { - static constexpr int min = 1000; - static constexpr int max = 11000; -}; - - -namespace px { -namespace carnot { -namespace builtins { - -enum class SinkResultsDestType : uint64_t { - grpc_sink = 9100, - otel_export = 9200, - amqp_events = 10001, // TODO(ddelnano): This is set to not collide with the planpb::OperatorType enum - cql_events, - dns_events, - http_events, - kafka_events, // Won't work since table is suffixed with ".beta" - mongodb_events, - mux_events, - mysql_events, - nats_events, // Won't work since table is suffixed with ".beta" - pgsql_events, - redis_events, -}; - -class PipelineDestToName : public udf::ScalarUDF { - public: - StringValue Exec(FunctionContext*, Int64Value input) { - auto protocol_events = magic_enum::enum_cast(input.val); - if (!protocol_events.has_value()) { - return "unknown"; - } - return std::string(magic_enum::enum_name(protocol_events.value())); - } - - static udf::ScalarUDFDocBuilder Doc() { - return udf::ScalarUDFDocBuilder( - "Convert the destination ID from the sink_results table to a human-readable name.") - .Details("TBD") - .Example(R"doc( -df = px.DataFrame("sink_results) -df.dest = px.pipeline_dest_to_name(df.destination))doc") - .Arg("dest", "The destination enum to covert.") - .Returns("The human-readable name of the destination."); - } -}; - -void RegisterPipelineOpsOrDie(udf::Registry* registry); - -} // namespace builtins -} // namespace carnot -} // namespace px diff --git a/src/carnot/plan/operators.cc b/src/carnot/plan/operators.cc index 488671e7a97..d9dfebecfd6 100644 --- a/src/carnot/plan/operators.cc +++ b/src/carnot/plan/operators.cc @@ -45,20 +45,6 @@ namespace plan { using px::Status; -// enable_if std::is_base_of_v -template >> -std::unique_ptr CreateOperator(int64_t id, const TProto& pb, - std::map context) { - auto op = std::make_unique(id, context); - auto s = op->Init(pb); - // On init failure, return null; - if (!s.ok()) { - LOG(ERROR) << "Failed to initialize operator with err: " << s.msg(); - return nullptr; - } - return op; -} - template std::unique_ptr CreateOperator(int64_t id, const TProto& pb) { auto op = std::make_unique(id); @@ -72,21 +58,19 @@ std::unique_ptr CreateOperator(int64_t id, const TProto& pb) { } std::unique_ptr Operator::FromProto(const planpb::Operator& pb, int64_t id) { - auto pb_context = pb.context(); - std::map context(pb_context.begin(), pb_context.end()); switch (pb.op_type()) { case planpb::MEMORY_SOURCE_OPERATOR: - return CreateOperator(id, pb.mem_source_op(), context); + return CreateOperator(id, pb.mem_source_op()); case planpb::MAP_OPERATOR: return CreateOperator(id, pb.map_op()); case planpb::AGGREGATE_OPERATOR: return CreateOperator(id, pb.agg_op()); case planpb::MEMORY_SINK_OPERATOR: - return CreateOperator(id, pb.mem_sink_op(), context); + return CreateOperator(id, pb.mem_sink_op()); case planpb::GRPC_SOURCE_OPERATOR: return CreateOperator(id, pb.grpc_source_op()); case planpb::GRPC_SINK_OPERATOR: - return CreateOperator(id, pb.grpc_sink_op(), context); + return CreateOperator(id, pb.grpc_sink_op()); case planpb::FILTER_OPERATOR: return CreateOperator(id, pb.filter_op()); case planpb::LIMIT_OPERATOR: @@ -104,7 +88,7 @@ std::unique_ptr Operator::FromProto(const planpb::Operator& pb, int64_ case planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR: return CreateOperator(id, pb.clickhouse_sink_op()); case planpb::OTEL_EXPORT_SINK_OPERATOR: - return CreateOperator(id, pb.otel_sink_op(), context); + return CreateOperator(id, pb.otel_sink_op()); default: LOG(FATAL) << absl::Substitute("Unknown operator type: $0", magic_enum::enum_name(pb.op_type())); diff --git a/src/carnot/plan/operators.h b/src/carnot/plan/operators.h index 19e0b428d97..d77b5d6b18c 100644 --- a/src/carnot/plan/operators.h +++ b/src/carnot/plan/operators.h @@ -438,6 +438,7 @@ class OTelExportSinkOperator : public Operator { // TODO(philkuz) temporary measure. const planpb::OTelExportSinkOperator& pb() const { return pb_; } + const ::google::protobuf::RepeatedPtrField& logs() const { return pb_.logs(); } const ::google::protobuf::RepeatedPtrField& metrics() const { return pb_.metrics(); } diff --git a/src/carnot/planner/cgo_export_test.cc b/src/carnot/planner/cgo_export_test.cc index 9abace0d86a..eed16a9a972 100644 --- a/src/carnot/planner/cgo_export_test.cc +++ b/src/carnot/planner/cgo_export_test.cc @@ -278,43 +278,6 @@ TEST_F(PlannerExportTest, compile_delete_tracepoint) { EXPECT_THAT(mutations_response_pb, EqualsProto(kExpectedDeleteTracepointsMutationPb)); } -constexpr char kSingleFileSource[] = R"pxl( -import pxlog - -glob_pattern = 'test.json' -pxlog.FileSource(glob_pattern, 'test_table', '5m') -)pxl"; - -constexpr char kSingleFileSourceProgramPb[] = R"pxl( -glob_pattern: "test.json" -table_name: "test_table" -ttl { - seconds: 300 -} -)pxl"; - -TEST_F(PlannerExportTest, compile_file_source_def) { - planner_ = MakePlanner(); - int result_len; - std::string mutation_request; - plannerpb::CompileMutationsRequest req; - req.set_query_str(kSingleFileSource); - *(req.mutable_logical_planner_state()) = testutils::CreateTwoPEMsOneKelvinPlannerState(); - ASSERT_TRUE(req.SerializeToString(&mutation_request)); - auto interface_result = PlannerCompileMutations(planner_, mutation_request.c_str(), - mutation_request.length(), &result_len); - - ASSERT_GT(result_len, 0); - plannerpb::CompileMutationsResponse mutations_response_pb; - ASSERT_TRUE(mutations_response_pb.ParseFromString( - std::string(interface_result, interface_result + result_len))); - delete[] interface_result; - ASSERT_OK(mutations_response_pb.status()); - ASSERT_EQ(mutations_response_pb.mutations().size(), 1); - EXPECT_THAT(mutations_response_pb.mutations()[0].file_source(), - EqualsProto(kSingleFileSourceProgramPb)); -} - constexpr char kExportPxL[] = R"pxl(import px otel_df = 'placeholder' df = px.DataFrame('http_events', start_time='-5m') diff --git a/src/carnot/planner/compiler/BUILD.bazel b/src/carnot/planner/compiler/BUILD.bazel index 359d3518227..1298c0775a9 100644 --- a/src/carnot/planner/compiler/BUILD.bazel +++ b/src/carnot/planner/compiler/BUILD.bazel @@ -40,7 +40,6 @@ pl_cc_library( "//src/carnot/planner/compiler/optimizer:cc_library", "//src/carnot/planner/compiler_error_context:cc_library", "//src/carnot/planner/compiler_state:cc_library", - "//src/carnot/planner/file_source:cc_library", "//src/carnot/planner/ir:cc_library", "//src/carnot/planner/metadata:cc_library", "//src/carnot/planner/objects:cc_library", diff --git a/src/carnot/planner/compiler/ast_visitor.cc b/src/carnot/planner/compiler/ast_visitor.cc index a4bfe1eb071..0047815780c 100644 --- a/src/carnot/planner/compiler/ast_visitor.cc +++ b/src/carnot/planner/compiler/ast_visitor.cc @@ -104,8 +104,6 @@ Status ASTVisitorImpl::SetupModules( PixieModule::Create(ir_graph_, compiler_state_, this, func_based_exec_, reserved_names_)); PX_ASSIGN_OR_RETURN((*module_handler_)[TraceModule::kTraceModuleObjName], TraceModule::Create(mutations_, this)); - PX_ASSIGN_OR_RETURN((*module_handler_)[LogModule::kLogModuleObjName], - LogModule::Create(mutations_, this)); PX_ASSIGN_OR_RETURN((*module_handler_)[ConfigModule::kConfigModuleObjName], ConfigModule::Create(mutations_, this)); for (const auto& [module_name, module_text] : module_name_to_pxl_map) { diff --git a/src/carnot/planner/compiler/ast_visitor.h b/src/carnot/planner/compiler/ast_visitor.h index 7984698ecb5..7d10e93a6ae 100644 --- a/src/carnot/planner/compiler/ast_visitor.h +++ b/src/carnot/planner/compiler/ast_visitor.h @@ -33,7 +33,6 @@ #include "src/carnot/funcs/builtins/math_ops.h" #include "src/carnot/planner/ast/ast_visitor.h" #include "src/carnot/planner/compiler_state/compiler_state.h" -#include "src/carnot/planner/file_source/log_module.h" #include "src/carnot/planner/ir/ast_utils.h" #include "src/carnot/planner/ir/ir.h" #include "src/carnot/planner/objects/dataframe.h" diff --git a/src/carnot/planner/compiler/test_utils.h b/src/carnot/planner/compiler/test_utils.h index 616fb8593d8..2f65f616b50 100644 --- a/src/carnot/planner/compiler/test_utils.h +++ b/src/carnot/planner/compiler/test_utils.h @@ -768,14 +768,6 @@ class OperatorTests : public ::testing::Test { types::DataType::FLOAT64, types::DataType::FLOAT64}), std::vector({"count", "cpu0", "cpu1", "cpu2"})); } - // Used for testing propagation of context to children. - table_store::schema::Relation MakeRelationWithMutation() { - std::optional mutation = "mutation"; - return table_store::schema::Relation( - std::vector({types::DataType::INT64, types::DataType::FLOAT64, - types::DataType::FLOAT64, types::DataType::FLOAT64}), - std::vector({"count", "cpu0", "cpu1", "cpu2"}), mutation); - } // Same as MakeRelation, but has a time column. table_store::schema::Relation MakeTimeRelation() { return table_store::schema::Relation( diff --git a/src/carnot/planner/distributed/coordinator/coordinator.cc b/src/carnot/planner/distributed/coordinator/coordinator.cc index ef468fbe130..b437bdf8c37 100644 --- a/src/carnot/planner/distributed/coordinator/coordinator.cc +++ b/src/carnot/planner/distributed/coordinator/coordinator.cc @@ -194,15 +194,8 @@ StatusOr> CoordinatorImpl::CoordinateImpl(const remote_carnot->AddPlan(remote_plan); distributed_plan->AddPlan(std::move(remote_plan_uptr)); - auto remote_agent_id = remote_carnot->carnot_info().agent_id(); std::vector source_node_ids; for (const auto& [i, data_store_info] : Enumerate(data_store_nodes_)) { - auto agent_id = data_store_info.agent_id(); - // For cases where the remote agent also has a data store, we don't need to add a source. - // This ensures that the MemorySource will be executed locally without an unnecessary GRPCSink/Source pair. - if (agent_id == remote_agent_id) { - continue; - } PX_ASSIGN_OR_RETURN(int64_t source_node_id, distributed_plan->AddCarnot(data_store_info)); distributed_plan->AddEdge(source_node_id, remote_node_id); source_node_ids.push_back(source_node_id); diff --git a/src/carnot/planner/distributed/coordinator/coordinator_test.cc b/src/carnot/planner/distributed/coordinator/coordinator_test.cc index b6466be90fe..e864338b88b 100644 --- a/src/carnot/planner/distributed/coordinator/coordinator_test.cc +++ b/src/carnot/planner/distributed/coordinator/coordinator_test.cc @@ -62,16 +62,6 @@ class CoordinatorTest : public testutils::DistributedRulesTest { ASSERT_OK(rule.Execute(graph.get())); } - void MakeGraphWithMutation() { - auto mem_src = MakeMemSource(MakeRelationWithMutation()); - compiler_state_->relation_map()->emplace("table", MakeRelationWithMutation()); - graph->RecordMutationId({"mutation"}); - MakeMemSink(mem_src, "out"); - - ResolveTypesRule rule(compiler_state_.get()); - ASSERT_OK(rule.Execute(graph.get())); - } - void VerifyHasDataSourcePlan(IR* plan) { auto mem_src_nodes = plan->FindNodesOfType(IRNodeType::kMemorySource); ASSERT_EQ(mem_src_nodes.size(), 1); @@ -154,48 +144,6 @@ TEST_F(CoordinatorTest, three_pems_one_kelvin) { } } -// TODO(ddelnano): Finish this test -TEST_F(CoordinatorTest, three_pems_one_kelvin_with_mut) { - auto ps = LoadDistributedStatePb(kThreePEMsOneKelvinDistributedState); - auto coordinator = Coordinator::Create(compiler_state_.get(), ps).ConsumeValueOrDie(); - - MakeGraphWithMutation(); - auto physical_plan = coordinator->Coordinate(graph.get()).ConsumeValueOrDie(); - - auto topo_sort = physical_plan->dag().TopologicalSort(); - // Last item should be kelvin, id 0. - ASSERT_EQ(topo_sort.size(), 4); - ASSERT_EQ(topo_sort[3], 0); - - auto kelvin_instance = physical_plan->Get(0); - EXPECT_THAT(kelvin_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); - { - SCOPED_TRACE("three pems one kelvin -> " + - kelvin_instance->carnot_info().query_broker_address()); - VerifyKelvinMergerPlan(kelvin_instance->plan()); - } - - // Agents are 1,2,3. - for (int64_t i = 1; i <= 3; ++i) { - auto pem_instance = physical_plan->Get(i); - SCOPED_TRACE("three pems one kelvin -> " + pem_instance->carnot_info().query_broker_address()); - EXPECT_THAT(pem_instance->carnot_info().query_broker_address(), ContainsRegex("pem")); - auto plan = pem_instance->plan(); - VerifyPEMPlan(plan); - - auto grpc_sink = plan->FindNodesOfType(IRNodeType::kGRPCSink); - - EXPECT_EQ(1, grpc_sink.size()); - planpb::Operator op; - auto grpc_sink_ir = static_cast(grpc_sink[0]); - // This unit test doesn't trigger the UpdateSink/AddDestinationIDMap code path, so trigger - // manually so the internal GRPC sink ToProto function works. - grpc_sink_ir->AddDestinationIDMap(0, i); - EXPECT_OK(grpc_sink_ir->ToProto(&op, i)); - EXPECT_EQ(1, op.context().size()); - } -} - TEST_F(CoordinatorTest, one_pem_three_kelvin) { auto ps = LoadDistributedStatePb(kOnePEMThreeKelvinsDistributedState); auto coordinator = Coordinator::Create(compiler_state_.get(), ps).ConsumeValueOrDie(); @@ -209,39 +157,14 @@ TEST_F(CoordinatorTest, one_pem_three_kelvin) { auto kelvin_instance = physical_plan->Get(0); EXPECT_THAT(kelvin_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); { - SCOPED_TRACE("one pem three kelvin -> kelvin plan"); - VerifyKelvinMergerPlan(kelvin_instance->plan()); - } - - auto pem_instance = physical_plan->Get(1); - EXPECT_THAT(pem_instance->carnot_info().query_broker_address(), ContainsRegex("pem")); - { - SCOPED_TRACE("one pem three kelvin -> pem plan"); - VerifyPEMPlan(pem_instance->plan()); - } -} - -TEST_F(CoordinatorTest, three_pem_one_kelvin_all_has_data_store) { - auto ps = LoadDistributedStatePb(testutils::kThreePEMsOneKelvinAllHasDataStoreDistributedState); - auto coordinator = Coordinator::Create(compiler_state_.get(), ps).ConsumeValueOrDie(); - - MakeGraph(); - - auto physical_plan = coordinator->Coordinate(graph.get()).ConsumeValueOrDie(); - ASSERT_EQ(physical_plan->dag().nodes().size(), 5UL); - /* EXPECT_THAT(physical_plan->dag().TopologicalSort(), ElementsAre(3, 1, 2, 4, 0)); */ - - auto kelvin_instance = physical_plan->Get(0); - EXPECT_THAT(kelvin_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); - { - SCOPED_TRACE("one pem three kelvin -> kelvin plan"); + SCOPED_TRACE("one pem one kelvin -> kelvin plan"); VerifyKelvinMergerPlan(kelvin_instance->plan()); } auto pem_instance = physical_plan->Get(1); EXPECT_THAT(pem_instance->carnot_info().query_broker_address(), ContainsRegex("pem")); { - SCOPED_TRACE("one pem three kelvin -> pem plan"); + SCOPED_TRACE("one pem one kelvin -> pem plan"); VerifyPEMPlan(pem_instance->plan()); } } diff --git a/src/carnot/planner/distributed/coordinator/prune_unavailable_sources_rule.cc b/src/carnot/planner/distributed/coordinator/prune_unavailable_sources_rule.cc index 3b5b2f85dc1..1af0e858da8 100644 --- a/src/carnot/planner/distributed/coordinator/prune_unavailable_sources_rule.cc +++ b/src/carnot/planner/distributed/coordinator/prune_unavailable_sources_rule.cc @@ -73,7 +73,8 @@ StatusOr PruneUnavailableSourcesRule::MaybePruneMemorySource(MemorySourceI } bool PruneUnavailableSourcesRule::AgentSupportsMemorySources() { - return carnot_info_.has_data_store() && carnot_info_.processes_data(); + return carnot_info_.has_data_store() && !carnot_info_.has_grpc_server() && + carnot_info_.processes_data(); } bool PruneUnavailableSourcesRule::AgentHasTable(std::string table_name) { diff --git a/src/carnot/planner/distributed/distributed_plan/distributed_plan.cc b/src/carnot/planner/distributed/distributed_plan/distributed_plan.cc index 2226005fabe..7fe66c7da83 100644 --- a/src/carnot/planner/distributed/distributed_plan/distributed_plan.cc +++ b/src/carnot/planner/distributed/distributed_plan/distributed_plan.cc @@ -50,10 +50,6 @@ StatusOr DistributedPlan::ToProto() const { dest->set_grpc_address(exec_complete_address_); dest->set_ssl_targetname(exec_complete_ssl_targetname_); } - if (qb_address_to_plan_pb->find(carnot->QueryBrokerAddress()) != - qb_address_to_plan_pb->end()) { - return error::Internal(absl::Substitute("Distributed plan has multiple nodes with the '$0' query broker address.", carnot->QueryBrokerAddress())); - } (*qb_address_to_plan_pb)[carnot->QueryBrokerAddress()] = plan_proto; (*qb_address_to_dag_id_pb)[carnot->QueryBrokerAddress()] = i; diff --git a/src/carnot/planner/distributed/distributed_planner_test.cc b/src/carnot/planner/distributed/distributed_planner_test.cc index fa4b0a8d0b7..28fee3533a3 100644 --- a/src/carnot/planner/distributed/distributed_planner_test.cc +++ b/src/carnot/planner/distributed/distributed_planner_test.cc @@ -213,78 +213,6 @@ TEST_F(DistributedPlannerTest, three_agents_one_kelvin) { EXPECT_THAT(grpc_sink_destinations, UnorderedElementsAreArray(grpc_source_ids)); } -TEST_F(DistributedPlannerTest, three_agents_with_participating_kelvin) { - auto mem_src = MakeMemSource(MakeRelation()); - compiler_state_->relation_map()->emplace("table", MakeRelation()); - MakeMemSink(mem_src, "out"); - - ResolveTypesRule rule(compiler_state_.get()); - ASSERT_OK(rule.Execute(graph.get())); - - distributedpb::DistributedState ps_pb = - LoadDistributedStatePb(testutils::kThreePEMsOneKelvinAllHasDataStoreDistributedState); - std::unique_ptr physical_planner = - DistributedPlanner::Create().ConsumeValueOrDie(); - std::unique_ptr physical_plan = - physical_planner->Plan(ps_pb, compiler_state_.get(), graph.get()).ConsumeValueOrDie(); - - ASSERT_OK(physical_plan->ToProto()); - auto topo_sort = physical_plan->dag().TopologicalSort(); - // Last item should be kelvin, id 0. - ASSERT_EQ(topo_sort.size(), 4); - ASSERT_EQ(topo_sort[3], 0); - - std::vector grpc_sink_destinations; - absl::flat_hash_set seen_plans; - for (int64_t i = 1; i <= 3; ++i) { - SCOPED_TRACE(absl::Substitute("agent id = $0", i)); - auto agent_instance = physical_plan->Get(i); - if (i != 4) { - EXPECT_THAT(agent_instance->carnot_info().query_broker_address(), ContainsRegex("pem")); - } else { - EXPECT_THAT(agent_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); - } - - if (seen_plans.contains(agent_instance->plan())) { - continue; - } - - seen_plans.insert(agent_instance->plan()); - std::vector grpc_sinks = - agent_instance->plan()->FindNodesOfType(IRNodeType::kGRPCSink); - ASSERT_EQ(grpc_sinks.size(), 1); - auto grpc_sink = static_cast(grpc_sinks[0]); - for (const auto& [agent_id, dest_id] : grpc_sink->agent_id_to_destination_id()) { - grpc_sink_destinations.push_back(dest_id); - } - } - - auto kelvin_instance = physical_plan->Get(0); - EXPECT_THAT(kelvin_instance->carnot_info().query_broker_address(), ContainsRegex("kelvin")); - - std::vector unions = kelvin_instance->plan()->FindNodesOfType(IRNodeType::kUnion); - ASSERT_EQ(unions.size(), 1); - UnionIR* kelvin_union = static_cast(unions[0]); - ASSERT_EQ(kelvin_union->parents().size(), 4); - - std::vector grpc_source_ids; - std::vector memory_source_ids; - for (OperatorIR* union_parent : kelvin_union->parents()) { - if (union_parent->type() == IRNodeType::kGRPCSource) { - auto grpc_source = static_cast(union_parent); - grpc_source_ids.push_back(grpc_source->id()); - } else { - ASSERT_EQ(union_parent->type(), IRNodeType::kMemorySource); - memory_source_ids.push_back(union_parent->id()); - } - } - ASSERT_EQ(grpc_source_ids.size(), 3); - ASSERT_EQ(memory_source_ids.size(), 1); - - // Make sure that the destinations are setup properly. - EXPECT_THAT(grpc_sink_destinations, UnorderedElementsAreArray(grpc_source_ids)); -} - using DistributedPlannerUDTFTests = DistributedRulesTest; TEST_F(DistributedPlannerUDTFTests, UDTFOnlyOnPEMsDoesntRunOnKelvin) { uint32_t asid = 123; diff --git a/src/carnot/planner/distributed/distributed_stitcher_rules_test.cc b/src/carnot/planner/distributed/distributed_stitcher_rules_test.cc index 34962fb8c9b..49879679256 100644 --- a/src/carnot/planner/distributed/distributed_stitcher_rules_test.cc +++ b/src/carnot/planner/distributed/distributed_stitcher_rules_test.cc @@ -298,64 +298,6 @@ TEST_F(StitcherTest, three_pems_one_kelvin) { } } -TEST_F(StitcherTest, three_pems_with_participating_kelvin) { - auto ps = LoadDistributedStatePb(testutils::kThreePEMsOneKelvinAllHasDataStoreDistributedState); - auto physical_plan = MakeDistributedPlan(ps); - auto topo_sort = physical_plan->dag().TopologicalSort(); - ASSERT_EQ(topo_sort.size(), 5); - ASSERT_EQ(topo_sort[4], 0); - - CarnotInstance* kelvin = physical_plan->Get(0); - std::string kelvin_qb_address = "kelvin"; - ASSERT_EQ(kelvin->carnot_info().query_broker_address(), kelvin_qb_address); - - std::vector data_sources; - for (int64_t agent_id = 1; agent_id <= 4; ++agent_id) { - CarnotInstance* agent = physical_plan->Get(agent_id); - // Quick check to make sure agents are valid. - ASSERT_THAT(agent->carnot_info().query_broker_address(), HasSubstr("pem")); - data_sources.push_back(agent); - } - // Kelvin can be a data source sometimes. - data_sources.push_back(kelvin); - { - SCOPED_TRACE("three_pems_with_participating_kelvin"); - TestBeforeSetSourceGroupGRPCAddress(data_sources, {kelvin}); - } - - // Execute the address rule. - DistributedSetSourceGroupGRPCAddressRule rule; - auto node_changed_or_s = rule.Execute(physical_plan.get()); - ASSERT_OK(node_changed_or_s); - ASSERT_TRUE(node_changed_or_s.ConsumeValueOrDie()); - - { - SCOPED_TRACE("three_pems_with_participating_kelvin"); - TestGRPCAddressSet({kelvin}); - } - - // Associate the edges of the graph. - AssociateDistributedPlanEdgesRule distributed_edges_rule; - node_changed_or_s = distributed_edges_rule.Execute(physical_plan.get()); - ASSERT_OK(node_changed_or_s); - ASSERT_TRUE(node_changed_or_s.ConsumeValueOrDie()); - - { - SCOPED_TRACE("three_pems_with_participating_kelvin"); - TestGRPCBridgesWiring(data_sources, {kelvin}); - } - - DistributedIRRule distributed_grpc_source_conv_rule; - node_changed_or_s = distributed_grpc_source_conv_rule.Execute(physical_plan.get()); - ASSERT_OK(node_changed_or_s); - ASSERT_TRUE(node_changed_or_s.ConsumeValueOrDie()); - - { - SCOPED_TRACE("three_pems_with_participating_kelvin"); - TestGRPCBridgesExpandedCorrectly(data_sources, {kelvin}); - } -} - // Test to see whether we can stitch a graph to itself. TEST_F(StitcherTest, stitch_self_together_with_udtf) { auto ps = LoadDistributedStatePb(kOnePEMOneKelvinDistributedState); @@ -397,7 +339,7 @@ TEST_F(StitcherTest, stitch_self_together_with_udtf) { } // Test to see whether we can stitch a graph to itself. -TEST_F(StitcherTest, stitch_all_together_with_udtf) { +TEST_F(StitcherTest, stitch_all_togther_with_udtf) { auto ps = LoadDistributedStatePb(kOnePEMOneKelvinDistributedState); // px._Test_MDState() is an all agent so it should run on every pem and kelvin. auto physical_plan = CoordinateQuery("import px\npx.display(px._Test_MD_State())", ps); @@ -439,8 +381,6 @@ TEST_F(StitcherTest, stitch_all_together_with_udtf) { // connected. auto kelvin_plan = kelvin->plan(); auto pem_plan = pem->plan(); - LOG(INFO) << "Kelvin plan: " << kelvin_plan->DebugString(); - LOG(INFO) << "PEM plan: " << pem_plan->DebugString(); auto kelvin_grpc_sinks = kelvin_plan->FindNodesThatMatch(InternalGRPCSink()); ASSERT_EQ(kelvin_grpc_sinks.size(), 1); diff --git a/src/carnot/planner/distributedpb/distributed_plan.pb.go b/src/carnot/planner/distributedpb/distributed_plan.pb.go index 64787d1782a..c285696167d 100755 --- a/src/carnot/planner/distributedpb/distributed_plan.pb.go +++ b/src/carnot/planner/distributedpb/distributed_plan.pb.go @@ -581,6 +581,89 @@ func (m *OTelEndpointConfig) GetTimeout() int64 { return 0 } +type ClickHouseConfig struct { + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"` + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,5,opt,name=password,proto3" json:"password,omitempty"` + Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"` +} + +func (m *ClickHouseConfig) Reset() { *m = ClickHouseConfig{} } +func (*ClickHouseConfig) ProtoMessage() {} +func (*ClickHouseConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_30dce4250507a2af, []int{8} +} +func (m *ClickHouseConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClickHouseConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClickHouseConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClickHouseConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickHouseConfig.Merge(m, src) +} +func (m *ClickHouseConfig) XXX_Size() int { + return m.Size() +} +func (m *ClickHouseConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClickHouseConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickHouseConfig proto.InternalMessageInfo + +func (m *ClickHouseConfig) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *ClickHouseConfig) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ClickHouseConfig) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ClickHouseConfig) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *ClickHouseConfig) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *ClickHouseConfig) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + type PluginConfig struct { StartTimeNs int64 `protobuf:"varint,1,opt,name=start_time_ns,json=startTimeNs,proto3" json:"start_time_ns,omitempty"` EndTimeNs int64 `protobuf:"varint,2,opt,name=end_time_ns,json=endTimeNs,proto3" json:"end_time_ns,omitempty"` @@ -589,7 +672,7 @@ type PluginConfig struct { func (m *PluginConfig) Reset() { *m = PluginConfig{} } func (*PluginConfig) ProtoMessage() {} func (*PluginConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_30dce4250507a2af, []int{8} + return fileDescriptor_30dce4250507a2af, []int{9} } func (m *PluginConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +722,7 @@ type DebugInfo struct { func (m *DebugInfo) Reset() { *m = DebugInfo{} } func (*DebugInfo) ProtoMessage() {} func (*DebugInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_30dce4250507a2af, []int{9} + return fileDescriptor_30dce4250507a2af, []int{10} } func (m *DebugInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -683,7 +766,7 @@ type DebugInfo_OTelDebugAttribute struct { func (m *DebugInfo_OTelDebugAttribute) Reset() { *m = DebugInfo_OTelDebugAttribute{} } func (*DebugInfo_OTelDebugAttribute) ProtoMessage() {} func (*DebugInfo_OTelDebugAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_30dce4250507a2af, []int{9, 0} + return fileDescriptor_30dce4250507a2af, []int{10, 0} } func (m *DebugInfo_OTelDebugAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -734,13 +817,14 @@ type LogicalPlannerState struct { RedactionOptions *RedactionOptions `protobuf:"bytes,7,opt,name=redaction_options,json=redactionOptions,proto3" json:"redaction_options,omitempty"` OTelEndpointConfig *OTelEndpointConfig `protobuf:"bytes,8,opt,name=otel_endpoint_config,json=otelEndpointConfig,proto3" json:"otel_endpoint_config,omitempty"` PluginConfig *PluginConfig `protobuf:"bytes,9,opt,name=plugin_config,json=pluginConfig,proto3" json:"plugin_config,omitempty"` + ClickhouseConfig *ClickHouseConfig `protobuf:"bytes,11,opt,name=clickhouse_config,json=clickhouseConfig,proto3" json:"clickhouse_config,omitempty"` DebugInfo *DebugInfo `protobuf:"bytes,10,opt,name=debug_info,json=debugInfo,proto3" json:"debug_info,omitempty"` } func (m *LogicalPlannerState) Reset() { *m = LogicalPlannerState{} } func (*LogicalPlannerState) ProtoMessage() {} func (*LogicalPlannerState) Descriptor() ([]byte, []int) { - return fileDescriptor_30dce4250507a2af, []int{10} + return fileDescriptor_30dce4250507a2af, []int{11} } func (m *LogicalPlannerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -818,6 +902,13 @@ func (m *LogicalPlannerState) GetPluginConfig() *PluginConfig { return nil } +func (m *LogicalPlannerState) GetClickhouseConfig() *ClickHouseConfig { + if m != nil { + return m.ClickhouseConfig + } + return nil +} + func (m *LogicalPlannerState) GetDebugInfo() *DebugInfo { if m != nil { return m.DebugInfo @@ -833,7 +924,7 @@ type LogicalPlannerResult struct { func (m *LogicalPlannerResult) Reset() { *m = LogicalPlannerResult{} } func (*LogicalPlannerResult) ProtoMessage() {} func (*LogicalPlannerResult) Descriptor() ([]byte, []int) { - return fileDescriptor_30dce4250507a2af, []int{11} + return fileDescriptor_30dce4250507a2af, []int{12} } func (m *LogicalPlannerResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -888,6 +979,7 @@ func init() { proto.RegisterType((*RedactionOptions)(nil), "px.carnot.planner.distributedpb.RedactionOptions") proto.RegisterType((*OTelEndpointConfig)(nil), "px.carnot.planner.distributedpb.OTelEndpointConfig") proto.RegisterMapType((map[string]string)(nil), "px.carnot.planner.distributedpb.OTelEndpointConfig.HeadersEntry") + proto.RegisterType((*ClickHouseConfig)(nil), "px.carnot.planner.distributedpb.ClickHouseConfig") proto.RegisterType((*PluginConfig)(nil), "px.carnot.planner.distributedpb.PluginConfig") proto.RegisterType((*DebugInfo)(nil), "px.carnot.planner.distributedpb.DebugInfo") proto.RegisterType((*DebugInfo_OTelDebugAttribute)(nil), "px.carnot.planner.distributedpb.DebugInfo.OTelDebugAttribute") @@ -900,104 +992,111 @@ func init() { } var fileDescriptor_30dce4250507a2af = []byte{ - // 1549 bytes of a gzipped FileDescriptorProto + // 1651 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x8a, 0x94, 0x48, 0x3e, 0x92, 0x12, 0x3d, 0xa2, 0x5c, 0x96, 0x48, 0x48, 0x97, 0x48, - 0x50, 0xc1, 0x76, 0x97, 0xa9, 0x12, 0x34, 0x69, 0x80, 0xb4, 0x11, 0x45, 0xc9, 0x62, 0xac, 0x26, - 0xea, 0x50, 0x06, 0x02, 0x1f, 0xba, 0x18, 0x72, 0x87, 0xe4, 0x22, 0xcb, 0xdd, 0xd5, 0xce, 0xac, - 0x21, 0xb5, 0x28, 0xd0, 0x1e, 0x7b, 0x6a, 0x3f, 0x46, 0x4f, 0xbd, 0xf5, 0xda, 0x6b, 0x7b, 0xf4, - 0x31, 0x27, 0x21, 0xa6, 0x2f, 0x3d, 0xe6, 0x0b, 0x14, 0x28, 0xe6, 0xcd, 0x2e, 0xb5, 0xa4, 0x09, - 0x48, 0x6e, 0x2f, 0xe4, 0xcc, 0x7b, 0xbf, 0xf7, 0x67, 0xe6, 0xbd, 0xdf, 0xcc, 0x2c, 0x7c, 0x2c, - 0xc2, 0x61, 0x7b, 0xc8, 0x42, 0xcf, 0x97, 0xed, 0xc0, 0x65, 0x9e, 0xc7, 0xc3, 0xb6, 0xed, 0x08, - 0x19, 0x3a, 0x83, 0x48, 0x72, 0x3b, 0x18, 0xa4, 0x67, 0x96, 0x42, 0x98, 0x41, 0xe8, 0x4b, 0x9f, - 0x34, 0x83, 0x4b, 0x53, 0xdb, 0x99, 0xb1, 0x9d, 0xb9, 0x60, 0x57, 0xaf, 0x8e, 0xfd, 0xb1, 0x8f, - 0xd8, 0xb6, 0x1a, 0x69, 0xb3, 0x7a, 0x53, 0xc5, 0x63, 0x81, 0xd3, 0xd6, 0x9a, 0x28, 0x72, 0x54, - 0x0c, 0xf5, 0x17, 0x03, 0xde, 0x59, 0x4a, 0x28, 0x18, 0xb4, 0x6f, 0xa2, 0xd6, 0xdf, 0x47, 0xad, - 0x3f, 0x9d, 0xfa, 0x5e, 0x7b, 0xc0, 0x04, 0x6f, 0x0b, 0xc9, 0x64, 0x24, 0x82, 0x41, 0x3c, 0x88, - 0x61, 0x0f, 0x15, 0x4c, 0x4c, 0x58, 0xc8, 0xed, 0xf6, 0xc0, 0xf5, 0xfd, 0xe9, 0xc8, 0x71, 0x25, - 0x0f, 0x83, 0x41, 0x7a, 0x16, 0x63, 0xdf, 0x4b, 0x61, 0xa7, 0x5c, 0x32, 0x9b, 0x49, 0x16, 0x0c, - 0xe6, 0xc3, 0x74, 0x60, 0xc9, 0x06, 0x2e, 0xb7, 0x84, 0xf4, 0x43, 0xde, 0x16, 0xc3, 0x09, 0x9f, - 0x2a, 0xa0, 0x1e, 0x68, 0x58, 0x6b, 0x66, 0x40, 0xe9, 0x57, 0xb1, 0x65, 0xcf, 0x1b, 0xf9, 0xe4, - 0x29, 0x6c, 0x27, 0x9e, 0xac, 0x91, 0xc3, 0x5d, 0x5b, 0xd4, 0x8c, 0x07, 0x99, 0xbd, 0xad, 0xfd, - 0x96, 0x19, 0x5c, 0x9a, 0x3a, 0xac, 0x79, 0x13, 0xd6, 0x4c, 0x8c, 0xcf, 0xaf, 0x02, 0x4e, 0xb7, - 0x12, 0xc5, 0x31, 0x5a, 0x92, 0xdf, 0xc1, 0xee, 0xe5, 0xe5, 0x84, 0x89, 0xc9, 0xcf, 0x3e, 0xb2, - 0x70, 0x21, 0x96, 0x5e, 0x49, 0x6d, 0xfd, 0x81, 0xb1, 0x57, 0xdc, 0x7f, 0x9c, 0x72, 0xb9, 0xb0, - 0x6a, 0xf3, 0xeb, 0xaf, 0x4f, 0xd0, 0xaa, 0xa3, 0xa4, 0xc7, 0x28, 0xed, 0xfc, 0x60, 0x76, 0xdd, - 0xdc, 0x59, 0xa1, 0x38, 0x59, 0xa3, 0x3b, 0x49, 0x94, 0x34, 0x3e, 0x0f, 0x9b, 0xda, 0x5f, 0xeb, - 0xbb, 0x2c, 0xc0, 0x21, 0x56, 0x08, 0x97, 0xf8, 0x01, 0x54, 0x2f, 0x22, 0x1e, 0x5e, 0x59, 0x83, - 0xd0, 0xff, 0x86, 0x87, 0x16, 0xb3, 0xed, 0x90, 0x0b, 0xb5, 0x4e, 0x63, 0xaf, 0x40, 0x09, 0xea, - 0x3a, 0xa8, 0x3a, 0xd0, 0x1a, 0xf2, 0x31, 0xe4, 0xd9, 0x98, 0x7b, 0xd2, 0x72, 0xec, 0x1a, 0x60, - 0xea, 0xdb, 0x2a, 0x75, 0xdd, 0x0c, 0xe6, 0xb3, 0x67, 0xbd, 0x6e, 0xa7, 0x38, 0xbb, 0x6e, 0xe6, - 0x0e, 0x14, 0xa8, 0xd7, 0xa5, 0x39, 0x44, 0xf7, 0x6c, 0xf2, 0x73, 0xd8, 0x9e, 0x30, 0x61, 0x8d, - 0xc3, 0x60, 0x68, 0x09, 0x1e, 0xbe, 0x88, 0x97, 0x9e, 0xef, 0xdc, 0x9b, 0x5d, 0x37, 0xcb, 0x27, - 0x4c, 0x3c, 0xa1, 0x67, 0x87, 0x7d, 0x54, 0xd0, 0xf2, 0x84, 0x89, 0x27, 0x61, 0x30, 0xd4, 0x53, - 0xb2, 0x0f, 0x25, 0x34, 0x4b, 0xb2, 0xcb, 0xa8, 0xec, 0x3a, 0xdb, 0xb3, 0xeb, 0x66, 0x51, 0x19, - 0xc5, 0xa9, 0xd1, 0xa2, 0x02, 0x25, 0x79, 0xbe, 0x07, 0x5b, 0x2a, 0x1c, 0x16, 0x0f, 0xab, 0x5e, - 0xcb, 0xaa, 0x68, 0xb4, 0x34, 0x61, 0xa2, 0xcb, 0x24, 0xeb, 0x2b, 0x19, 0x79, 0x1f, 0xb6, 0x82, - 0xd0, 0x1f, 0x72, 0x21, 0xb8, 0xc6, 0xd6, 0x36, 0x10, 0x55, 0x9e, 0x4b, 0x15, 0x96, 0x7c, 0x04, - 0xf7, 0xd9, 0x70, 0xc8, 0x03, 0x29, 0xac, 0x90, 0x4f, 0x7d, 0xc9, 0x2d, 0xe1, 0x47, 0xe1, 0x90, - 0x8b, 0xda, 0x26, 0xc2, 0xab, 0xb1, 0x96, 0xa2, 0xb2, 0xaf, 0x75, 0xa4, 0x07, 0xa0, 0xbb, 0xce, - 0xf1, 0x46, 0x7e, 0x2d, 0xf7, 0x20, 0xb3, 0x57, 0xdc, 0x7f, 0x68, 0xde, 0xc2, 0x3d, 0xf3, 0x5c, - 0x99, 0xa8, 0xe2, 0xd0, 0x82, 0x4c, 0x86, 0xe4, 0x1d, 0xc8, 0x32, 0xe1, 0xd8, 0xb5, 0xfc, 0x03, - 0x63, 0xaf, 0xdc, 0xc9, 0xcf, 0xae, 0x9b, 0xd9, 0x83, 0x7e, 0xaf, 0x4b, 0x51, 0x4a, 0x28, 0x94, - 0xe7, 0x8d, 0x8a, 0xb1, 0x0a, 0x58, 0x98, 0x9f, 0xdc, 0x1a, 0x2b, 0xdd, 0xee, 0xb4, 0x34, 0x4d, - 0x37, 0xff, 0x27, 0xb0, 0x25, 0x84, 0x6b, 0x49, 0x16, 0x8e, 0xb9, 0xf4, 0xd8, 0x94, 0xd7, 0x8a, - 0xb8, 0xeb, 0x58, 0xad, 0x7e, 0xff, 0xf4, 0x1c, 0x15, 0x5f, 0xb2, 0x29, 0xa7, 0x65, 0x21, 0xdc, - 0xf3, 0x39, 0xae, 0x35, 0x81, 0xc2, 0x7c, 0x0d, 0xa4, 0x0a, 0x1b, 0xb8, 0x8a, 0xb8, 0xa3, 0xf4, - 0x84, 0x3c, 0x82, 0x7b, 0x38, 0x90, 0xce, 0x6f, 0x99, 0x74, 0x7c, 0xcf, 0xfa, 0x86, 0x5f, 0x61, - 0x37, 0x14, 0x68, 0x65, 0x41, 0xf1, 0x94, 0x5f, 0x91, 0x1a, 0xe4, 0xb4, 0x4c, 0x15, 0x3e, 0xb3, - 0x57, 0xa0, 0xc9, 0xb4, 0xf5, 0x67, 0x03, 0xa0, 0x8f, 0x14, 0xc6, 0x58, 0x04, 0xb2, 0x98, 0xa8, - 0x0e, 0x85, 0x63, 0xf2, 0x19, 0xe4, 0x43, 0xee, 0xa2, 0xaf, 0x98, 0x69, 0x3f, 0x52, 0xbb, 0x92, - 0x3a, 0x0d, 0xcc, 0xe4, 0x34, 0x30, 0x69, 0x0c, 0xa4, 0x73, 0x13, 0x62, 0x02, 0xe8, 0x6e, 0x77, - 0x1d, 0x21, 0x31, 0xfc, 0x9b, 0xfd, 0x4e, 0x0b, 0x08, 0x39, 0x75, 0x84, 0x6c, 0xfd, 0xcd, 0x80, - 0x4a, 0xf7, 0x66, 0x8b, 0xfb, 0x92, 0x49, 0x4e, 0x4e, 0xa1, 0xa8, 0xab, 0xa0, 0x8b, 0x63, 0xa0, - 0x97, 0x47, 0xb7, 0x16, 0xe7, 0x86, 0xa6, 0x14, 0x86, 0x37, 0x94, 0x3d, 0x85, 0xa2, 0xce, 0x58, - 0x7b, 0x5b, 0xbf, 0xa3, 0xb7, 0x9b, 0x7d, 0xa2, 0x20, 0xe6, 0xe3, 0xd6, 0x3f, 0x33, 0xb0, 0x9d, - 0x4a, 0xf8, 0xcc, 0x65, 0x1e, 0x09, 0x81, 0x5c, 0x0c, 0x12, 0xb2, 0x59, 0xd2, 0xc7, 0xab, 0x23, - 0x4e, 0xfb, 0xe8, 0xd6, 0x40, 0x4b, 0xde, 0xcc, 0x5f, 0x0f, 0x62, 0x4a, 0x9e, 0xfb, 0x6a, 0x7e, - 0xe4, 0xc9, 0xf0, 0x8a, 0x6e, 0x5f, 0x2c, 0x4a, 0xc9, 0x0b, 0xa8, 0x2e, 0xc6, 0xb4, 0xd9, 0x58, - 0x1d, 0x31, 0x7a, 0x79, 0xc7, 0xff, 0x4f, 0xd4, 0x2e, 0x1b, 0xf7, 0x6c, 0x1d, 0xb6, 0x72, 0xb1, - 0x24, 0x26, 0x3f, 0x86, 0x8c, 0xcd, 0xc6, 0x78, 0xa2, 0x14, 0xf7, 0x77, 0x97, 0xc2, 0x28, 0xbf, - 0x07, 0x4f, 0xa8, 0x42, 0xd4, 0x9f, 0x43, 0x75, 0xd5, 0x4a, 0x48, 0x05, 0x32, 0xaa, 0x79, 0x75, - 0xcf, 0xa9, 0x21, 0x79, 0x0c, 0x1b, 0x2f, 0x98, 0x1b, 0xf1, 0xb8, 0xdf, 0xee, 0xbf, 0xe9, 0x54, - 0x59, 0x53, 0x0d, 0xfa, 0x74, 0xfd, 0x13, 0xa3, 0x7e, 0x08, 0xbb, 0x2b, 0xf3, 0x5d, 0xe1, 0xbc, - 0x9a, 0x76, 0x9e, 0x4d, 0x39, 0x69, 0xfd, 0xd1, 0x80, 0x0a, 0xe5, 0x36, 0x1b, 0xaa, 0xc6, 0xfd, - 0x2a, 0x50, 0xbf, 0x82, 0x3c, 0x06, 0x12, 0x09, 0x6e, 0x8d, 0x22, 0xd7, 0xb5, 0xc2, 0x44, 0x89, - 0xfe, 0xf2, 0xb4, 0x12, 0x09, 0x7e, 0x1c, 0xb9, 0xee, 0xdc, 0x88, 0xfc, 0x12, 0xde, 0x55, 0xe8, - 0xe0, 0x32, 0xc6, 0x5a, 0x81, 0xe3, 0x58, 0x03, 0x2e, 0xa4, 0xc5, 0x47, 0x23, 0x3f, 0x94, 0xfa, - 0xc0, 0xa6, 0xb5, 0x48, 0xf0, 0xb3, 0x4b, 0x6d, 0x76, 0xe6, 0x38, 0x1d, 0x2e, 0xe4, 0x11, 0xea, - 0x5b, 0xff, 0x31, 0x80, 0x7c, 0x75, 0xce, 0xdd, 0x23, 0xcf, 0x0e, 0x7c, 0xc7, 0x93, 0x87, 0xbe, - 0x37, 0x72, 0xc6, 0xe4, 0x87, 0x90, 0x89, 0x42, 0x57, 0x2f, 0xa3, 0x93, 0x9b, 0x5d, 0x37, 0x33, - 0xcf, 0xe8, 0x29, 0x55, 0x32, 0xf2, 0x1c, 0x72, 0x13, 0xce, 0x6c, 0x1e, 0x8a, 0xb8, 0xd4, 0x9f, - 0xdf, 0x5a, 0xea, 0x37, 0x03, 0x98, 0x27, 0xda, 0x85, 0x2e, 0x72, 0xe2, 0x90, 0xd4, 0x21, 0xef, - 0x78, 0x82, 0x0f, 0xa3, 0x90, 0x63, 0x81, 0xf3, 0x74, 0x3e, 0xc7, 0x43, 0xc5, 0x99, 0x72, 0x3f, - 0x92, 0x78, 0x2f, 0x64, 0x68, 0x32, 0xad, 0x7f, 0x0a, 0xa5, 0xb4, 0xbb, 0xdb, 0x6a, 0x50, 0x48, - 0xd7, 0x80, 0x42, 0xe9, 0xcc, 0x8d, 0xc6, 0x8e, 0x17, 0x2f, 0xbc, 0x05, 0x65, 0x21, 0x59, 0x28, - 0x2d, 0xe5, 0xdc, 0xf2, 0xf4, 0xbd, 0x9a, 0xa1, 0x45, 0x14, 0x9e, 0x3b, 0x53, 0xfe, 0xa5, 0x20, - 0x0d, 0x28, 0x72, 0xcf, 0x9e, 0x23, 0xd6, 0x11, 0x51, 0xe0, 0x9e, 0xad, 0xf5, 0xad, 0x7f, 0x18, - 0x50, 0xe8, 0xf2, 0x41, 0x34, 0x46, 0xf6, 0x5f, 0xc0, 0xae, 0x2f, 0xb9, 0x6b, 0xd9, 0x4a, 0x62, - 0x31, 0x19, 0xef, 0x8b, 0x88, 0xe9, 0xf9, 0xd9, 0xed, 0x44, 0x49, 0x5c, 0xe1, 0x3e, 0xe2, 0xec, - 0x20, 0xf1, 0x42, 0x77, 0x94, 0xef, 0x45, 0x99, 0xa8, 0xff, 0x42, 0xd7, 0x74, 0x51, 0xbc, 0xf2, - 0xb0, 0x5d, 0xb9, 0x31, 0xad, 0xbf, 0x6f, 0xc0, 0xce, 0xa9, 0x3f, 0x76, 0x86, 0xcc, 0x3d, 0xd3, - 0x29, 0xe9, 0x63, 0xf1, 0x37, 0x70, 0x2f, 0xfd, 0x3e, 0x55, 0x8f, 0xc0, 0x84, 0x33, 0x3f, 0x7d, - 0x1b, 0xbe, 0xa3, 0x37, 0x5a, 0xb1, 0x97, 0x8f, 0xdd, 0xcf, 0xa1, 0xa4, 0x6c, 0x2d, 0x5f, 0x73, - 0x21, 0xe6, 0xf8, 0xbb, 0xab, 0xe9, 0x18, 0x13, 0x86, 0x16, 0x83, 0x9b, 0x89, 0x7a, 0x1d, 0x84, - 0x5c, 0x44, 0xae, 0x9c, 0xbf, 0x3c, 0xb2, 0xb8, 0xb0, 0xb2, 0x96, 0x26, 0x4f, 0x8d, 0xa7, 0xb0, - 0x1b, 0xc3, 0x96, 0x6e, 0xcc, 0x0d, 0x6c, 0x78, 0x7c, 0xac, 0x51, 0x04, 0x2c, 0xde, 0x9b, 0x3b, - 0xda, 0xaa, 0x9f, 0xbe, 0x3d, 0xd5, 0xae, 0xcc, 0x89, 0x3a, 0x4f, 0x3d, 0x77, 0xc7, 0x5d, 0x59, - 0xe6, 0x3f, 0xad, 0x84, 0xcb, 0x27, 0xc2, 0xef, 0xa1, 0x8a, 0x0d, 0xc4, 0x63, 0x06, 0x59, 0x43, - 0x6c, 0x55, 0x7c, 0x59, 0x14, 0xf7, 0x3f, 0xfc, 0x1f, 0xd8, 0xd7, 0xb9, 0x3f, 0xbb, 0x6e, 0xae, - 0xa0, 0x3d, 0x25, 0x2a, 0xd0, 0xd2, 0x51, 0x40, 0xa1, 0x1c, 0x20, 0x43, 0x92, 0xb8, 0x77, 0x7d, - 0xaa, 0xa4, 0x79, 0x45, 0x4b, 0x41, 0x9a, 0x65, 0x3d, 0x00, 0x4d, 0x07, 0xbc, 0x10, 0xf5, 0xa3, - 0xf4, 0xe1, 0xdd, 0x89, 0x40, 0x0b, 0x76, 0x32, 0xfc, 0x22, 0x9b, 0x37, 0x2a, 0xeb, 0x5f, 0x64, - 0xf3, 0x9b, 0x95, 0x5c, 0xeb, 0x4f, 0x06, 0x54, 0x17, 0xfb, 0x56, 0x17, 0x91, 0x3c, 0x82, 0x4d, - 0xfd, 0xc5, 0x82, 0xcd, 0x5f, 0xdc, 0xdf, 0xc1, 0xb7, 0x7b, 0xfc, 0x31, 0x63, 0xf6, 0x71, 0x40, - 0x63, 0x08, 0xe9, 0x42, 0x16, 0xaf, 0x4f, 0xdd, 0xd8, 0x1f, 0xbc, 0xed, 0x45, 0x46, 0xd1, 0xba, - 0x73, 0xf8, 0xf2, 0x55, 0x63, 0xed, 0xdb, 0x57, 0x8d, 0xb5, 0xef, 0x5f, 0x35, 0x8c, 0x3f, 0xcc, - 0x1a, 0xc6, 0x5f, 0x67, 0x0d, 0xe3, 0x5f, 0xb3, 0x86, 0xf1, 0x72, 0xd6, 0x30, 0xbe, 0x9b, 0x35, - 0x8c, 0x7f, 0xcf, 0x1a, 0x6b, 0xdf, 0xcf, 0x1a, 0xc6, 0x5f, 0x5e, 0x37, 0xd6, 0x5e, 0xbe, 0x6e, - 0xac, 0x7d, 0xfb, 0xba, 0xb1, 0xf6, 0xbc, 0xbc, 0xe0, 0x7a, 0xb0, 0x89, 0xdf, 0x39, 0x1f, 0xfe, - 0x37, 0x00, 0x00, 0xff, 0xff, 0x01, 0xc0, 0xd4, 0xed, 0x38, 0x0e, 0x00, 0x00, + 0x15, 0xd7, 0x8a, 0xb4, 0x44, 0x3e, 0x8a, 0x12, 0x3d, 0xa2, 0x5c, 0x96, 0x48, 0x48, 0x97, 0x48, + 0x50, 0xc1, 0x76, 0x97, 0xa9, 0x12, 0x34, 0x69, 0x80, 0xb4, 0x11, 0x25, 0xdb, 0x52, 0xac, 0x26, + 0xea, 0x50, 0x06, 0x02, 0x1f, 0xb2, 0x18, 0x72, 0x47, 0xe4, 0xc2, 0xcb, 0xdd, 0xd5, 0xcc, 0xac, + 0x2b, 0xb5, 0x28, 0xd0, 0x1e, 0x7b, 0x6a, 0x2f, 0xfd, 0x0e, 0x45, 0x0f, 0xfd, 0x08, 0xbd, 0xb6, + 0x47, 0x1f, 0x73, 0x12, 0x62, 0xfa, 0xd2, 0x63, 0xbe, 0x40, 0x81, 0x62, 0xde, 0xec, 0xae, 0x96, + 0x34, 0x01, 0x29, 0xcd, 0x45, 0x9a, 0x79, 0xef, 0xf7, 0x7e, 0xef, 0xcd, 0xbe, 0x3f, 0x33, 0x84, + 0x0f, 0xa5, 0x18, 0x76, 0x87, 0x4c, 0x04, 0xa1, 0xea, 0x46, 0x3e, 0x0b, 0x02, 0x2e, 0xba, 0xae, + 0x27, 0x95, 0xf0, 0x06, 0xb1, 0xe2, 0x6e, 0x34, 0xc8, 0xef, 0x1c, 0x8d, 0xb0, 0x23, 0x11, 0xaa, + 0x90, 0xb4, 0xa3, 0x73, 0xdb, 0xd8, 0xd9, 0x89, 0x9d, 0x3d, 0x63, 0xd7, 0xac, 0x8f, 0xc2, 0x51, + 0x88, 0xd8, 0xae, 0x5e, 0x19, 0xb3, 0x66, 0x5b, 0xfb, 0x63, 0x91, 0xd7, 0x35, 0x9a, 0x38, 0xf6, + 0xb4, 0x0f, 0xfd, 0x2f, 0x01, 0xbc, 0x35, 0x17, 0x50, 0x34, 0xe8, 0x5e, 0x79, 0x6d, 0xbe, 0x8b, + 0xda, 0x70, 0x32, 0x09, 0x83, 0xee, 0x80, 0x49, 0xde, 0x95, 0x8a, 0xa9, 0x58, 0x46, 0x83, 0x64, + 0x91, 0xc0, 0xee, 0x69, 0x98, 0x1c, 0x33, 0xc1, 0xdd, 0xee, 0xc0, 0x0f, 0xc3, 0xc9, 0xa9, 0xe7, + 0x2b, 0x2e, 0xa2, 0x41, 0x7e, 0x97, 0x60, 0xdf, 0xc9, 0x61, 0x27, 0x5c, 0x31, 0x97, 0x29, 0x16, + 0x0d, 0xb2, 0x65, 0xde, 0xb1, 0x62, 0x03, 0x9f, 0x3b, 0x52, 0x85, 0x82, 0x77, 0xe5, 0x70, 0xcc, + 0x27, 0x1a, 0x68, 0x16, 0x06, 0xd6, 0x99, 0x5a, 0xb0, 0xf6, 0xab, 0xc4, 0xf2, 0x30, 0x38, 0x0d, + 0xc9, 0x13, 0xd8, 0x48, 0x99, 0x9c, 0x53, 0x8f, 0xfb, 0xae, 0x6c, 0x58, 0x77, 0x0b, 0xdb, 0xeb, + 0x3b, 0x1d, 0x3b, 0x3a, 0xb7, 0x8d, 0x5b, 0xfb, 0xca, 0xad, 0x9d, 0x1a, 0x9f, 0x5c, 0x44, 0x9c, + 0xae, 0xa7, 0x8a, 0x47, 0x68, 0x49, 0x7e, 0x07, 0x5b, 0xe7, 0xe7, 0x63, 0x26, 0xc7, 0x3f, 0xfb, + 0xc0, 0xc1, 0x83, 0x38, 0xe6, 0x24, 0x8d, 0xe5, 0xbb, 0xd6, 0x76, 0x65, 0xe7, 0x41, 0x8e, 0x72, + 0xe6, 0xd4, 0xf6, 0x97, 0x5f, 0x1e, 0xa0, 0x55, 0x4f, 0x4b, 0x1f, 0xa1, 0xb4, 0xf7, 0x83, 0xe9, + 0x65, 0x7b, 0x73, 0x81, 0xe2, 0x60, 0x89, 0x6e, 0xa6, 0x5e, 0xf2, 0xf8, 0x12, 0xac, 0x18, 0xbe, + 0xce, 0x37, 0x45, 0x80, 0x3d, 0xcc, 0x10, 0x1e, 0xf1, 0x3d, 0xa8, 0x9f, 0xc5, 0x5c, 0x5c, 0x38, + 0x03, 0x11, 0x3e, 0xe7, 0xc2, 0x61, 0xae, 0x2b, 0xb8, 0xd4, 0xe7, 0xb4, 0xb6, 0xcb, 0x94, 0xa0, + 0xae, 0x87, 0xaa, 0x5d, 0xa3, 0x21, 0x1f, 0x42, 0x89, 0x8d, 0x78, 0xa0, 0x1c, 0xcf, 0x6d, 0x00, + 0x86, 0xbe, 0xa1, 0x43, 0x37, 0xc5, 0x60, 0x3f, 0x7d, 0x7a, 0xb8, 0xdf, 0xab, 0x4c, 0x2f, 0xdb, + 0xab, 0xbb, 0x1a, 0x74, 0xb8, 0x4f, 0x57, 0x11, 0x7d, 0xe8, 0x92, 0x9f, 0xc3, 0xc6, 0x98, 0x49, + 0x67, 0x24, 0xa2, 0xa1, 0x23, 0xb9, 0x78, 0x91, 0x1c, 0xbd, 0xd4, 0xbb, 0x3d, 0xbd, 0x6c, 0x57, + 0x0f, 0x98, 0x7c, 0x4c, 0x8f, 0xf7, 0xfa, 0xa8, 0xa0, 0xd5, 0x31, 0x93, 0x8f, 0x45, 0x34, 0x34, + 0x5b, 0xb2, 0x03, 0x6b, 0x68, 0x96, 0x46, 0x57, 0xd0, 0xd1, 0xf5, 0x36, 0xa6, 0x97, 0xed, 0x8a, + 0x36, 0x4a, 0x42, 0xa3, 0x15, 0x0d, 0x4a, 0xe3, 0x7c, 0x07, 0xd6, 0xb5, 0x3b, 0x4c, 0x1e, 0x66, + 0xbd, 0x51, 0xd4, 0xde, 0xe8, 0xda, 0x98, 0xc9, 0x7d, 0xa6, 0x58, 0x5f, 0xcb, 0xc8, 0xbb, 0xb0, + 0x1e, 0x89, 0x70, 0xc8, 0xa5, 0xe4, 0x06, 0xdb, 0xb8, 0x85, 0xa8, 0x6a, 0x26, 0xd5, 0x58, 0xf2, + 0x01, 0xdc, 0x61, 0xc3, 0x21, 0x8f, 0x94, 0x74, 0x04, 0x9f, 0x84, 0x8a, 0x3b, 0x32, 0x8c, 0xc5, + 0x90, 0xcb, 0xc6, 0x0a, 0xc2, 0xeb, 0x89, 0x96, 0xa2, 0xb2, 0x6f, 0x74, 0xe4, 0x10, 0xc0, 0x54, + 0x9d, 0x17, 0x9c, 0x86, 0x8d, 0xd5, 0xbb, 0x85, 0xed, 0xca, 0xce, 0x3d, 0xfb, 0x9a, 0xde, 0xb3, + 0x4f, 0xb4, 0x89, 0x4e, 0x0e, 0x2d, 0xab, 0x74, 0x49, 0xde, 0x82, 0x22, 0x93, 0x9e, 0xdb, 0x28, + 0xdd, 0xb5, 0xb6, 0xab, 0xbd, 0xd2, 0xf4, 0xb2, 0x5d, 0xdc, 0xed, 0x1f, 0xee, 0x53, 0x94, 0x12, + 0x0a, 0xd5, 0xac, 0x50, 0xd1, 0x57, 0x19, 0x13, 0xf3, 0x93, 0x6b, 0x7d, 0xe5, 0xcb, 0x9d, 0xae, + 0x4d, 0xf2, 0xc5, 0xff, 0x11, 0xac, 0x4b, 0xe9, 0x3b, 0x8a, 0x89, 0x11, 0x57, 0x01, 0x9b, 0xf0, + 0x46, 0x05, 0xbf, 0x3a, 0x66, 0xab, 0xdf, 0x3f, 0x3a, 0x41, 0xc5, 0xe7, 0x6c, 0xc2, 0x69, 0x55, + 0x4a, 0xff, 0x24, 0xc3, 0x75, 0xc6, 0x50, 0xce, 0xce, 0x40, 0xea, 0x70, 0x0b, 0x4f, 0x91, 0x54, + 0x94, 0xd9, 0x90, 0xfb, 0x70, 0x1b, 0x17, 0xca, 0xfb, 0x2d, 0x53, 0x5e, 0x18, 0x38, 0xcf, 0xf9, + 0x05, 0x56, 0x43, 0x99, 0xd6, 0x66, 0x14, 0x4f, 0xf8, 0x05, 0x69, 0xc0, 0xaa, 0x91, 0xe9, 0xc4, + 0x17, 0xb6, 0xcb, 0x34, 0xdd, 0x76, 0xfe, 0x6c, 0x01, 0xf4, 0xb1, 0x85, 0xd1, 0x17, 0x81, 0x22, + 0x06, 0x6a, 0x5c, 0xe1, 0x9a, 0x7c, 0x02, 0x25, 0xc1, 0x7d, 0xe4, 0x4a, 0x3a, 0xed, 0x47, 0xfa, + 0xab, 0xe4, 0xa6, 0x81, 0x9d, 0x4e, 0x03, 0x9b, 0x26, 0x40, 0x9a, 0x99, 0x10, 0x1b, 0xc0, 0x54, + 0xbb, 0xef, 0x49, 0x85, 0xee, 0xdf, 0xac, 0x77, 0x5a, 0x46, 0xc8, 0x91, 0x27, 0x55, 0xe7, 0x1f, + 0x16, 0xd4, 0xf6, 0xaf, 0x3e, 0x71, 0x5f, 0x31, 0xc5, 0xc9, 0x11, 0x54, 0x4c, 0x16, 0x4c, 0x72, + 0x2c, 0x64, 0xb9, 0x7f, 0x6d, 0x72, 0xae, 0xda, 0x94, 0xc2, 0xf0, 0xaa, 0x65, 0x8f, 0xa0, 0x62, + 0x22, 0x36, 0x6c, 0xcb, 0x37, 0x64, 0xbb, 0xfa, 0x4e, 0x14, 0x64, 0xb6, 0xee, 0xfc, 0xab, 0x00, + 0x1b, 0xb9, 0x80, 0x8f, 0x7d, 0x16, 0x10, 0x01, 0xe4, 0x6c, 0x90, 0x36, 0x9b, 0xa3, 0x42, 0xbc, + 0x3a, 0x92, 0xb0, 0x1f, 0x5e, 0xeb, 0x68, 0x8e, 0xcd, 0xfe, 0xf5, 0x20, 0x69, 0xc9, 0x93, 0x50, + 0xef, 0x1f, 0x06, 0x4a, 0x5c, 0xd0, 0x8d, 0xb3, 0x59, 0x29, 0x79, 0x01, 0xf5, 0x59, 0x9f, 0x2e, + 0x1b, 0xe9, 0x11, 0x63, 0x8e, 0xf7, 0xe8, 0xfb, 0x78, 0xdd, 0x67, 0xa3, 0x43, 0xd7, 0xb8, 0xad, + 0x9d, 0xcd, 0x89, 0xc9, 0x8f, 0xa1, 0xe0, 0xb2, 0x11, 0x4e, 0x94, 0xca, 0xce, 0xd6, 0x9c, 0x1b, + 0xcd, 0xbb, 0xfb, 0x98, 0x6a, 0x44, 0xf3, 0x19, 0xd4, 0x17, 0x9d, 0x84, 0xd4, 0xa0, 0xa0, 0x8b, + 0xd7, 0xd4, 0x9c, 0x5e, 0x92, 0x07, 0x70, 0xeb, 0x05, 0xf3, 0x63, 0x9e, 0xd4, 0xdb, 0x9d, 0x37, + 0x49, 0xb5, 0x35, 0x35, 0xa0, 0x8f, 0x97, 0x3f, 0xb2, 0x9a, 0x7b, 0xb0, 0xb5, 0x30, 0xde, 0x05, + 0xe4, 0xf5, 0x3c, 0x79, 0x31, 0x47, 0xd2, 0xf9, 0xa3, 0x05, 0x35, 0xca, 0x5d, 0x36, 0xd4, 0x85, + 0xfb, 0x45, 0xa4, 0xff, 0x4a, 0xf2, 0x00, 0x48, 0x2c, 0xb9, 0x73, 0x1a, 0xfb, 0xbe, 0x23, 0x52, + 0x25, 0xf2, 0x95, 0x68, 0x2d, 0x96, 0xfc, 0x51, 0xec, 0xfb, 0x99, 0x11, 0xf9, 0x25, 0xbc, 0xad, + 0xd1, 0xd1, 0x79, 0x82, 0x75, 0x22, 0xcf, 0x73, 0x06, 0x5c, 0x2a, 0x87, 0x9f, 0x9e, 0x86, 0x42, + 0x99, 0x81, 0x4d, 0x1b, 0xb1, 0xe4, 0xc7, 0xe7, 0xc6, 0xec, 0xd8, 0xf3, 0x7a, 0x5c, 0xaa, 0x87, + 0xa8, 0xef, 0xfc, 0xd7, 0x02, 0xf2, 0xc5, 0x09, 0xf7, 0x1f, 0x06, 0x6e, 0x14, 0x7a, 0x81, 0xda, + 0x0b, 0x83, 0x53, 0x6f, 0x44, 0x7e, 0x08, 0x85, 0x58, 0xf8, 0xe6, 0x18, 0xbd, 0xd5, 0xe9, 0x65, + 0xbb, 0xf0, 0x94, 0x1e, 0x51, 0x2d, 0x23, 0xcf, 0x60, 0x75, 0xcc, 0x99, 0xcb, 0x85, 0x4c, 0x52, + 0xfd, 0xe9, 0xb5, 0xa9, 0x7e, 0xd3, 0x81, 0x7d, 0x60, 0x28, 0x4c, 0x92, 0x53, 0x42, 0xd2, 0x84, + 0x92, 0x17, 0x48, 0x3e, 0x8c, 0x05, 0xc7, 0x04, 0x97, 0x68, 0xb6, 0xc7, 0xa1, 0xe2, 0x4d, 0x78, + 0x18, 0x2b, 0xbc, 0x17, 0x0a, 0x34, 0xdd, 0x36, 0x3f, 0x86, 0xb5, 0x3c, 0xdd, 0x75, 0x39, 0x28, + 0xe7, 0x73, 0xf0, 0x77, 0x0b, 0x6a, 0x7b, 0xbe, 0x37, 0x7c, 0x7e, 0x10, 0xc6, 0x92, 0x27, 0xa7, + 0x6f, 0x42, 0x69, 0x1c, 0x4a, 0x95, 0x1b, 0x4d, 0xd9, 0x5e, 0x8f, 0x2c, 0xbd, 0x4e, 0x98, 0x70, + 0xad, 0x65, 0x91, 0xfe, 0xd8, 0x3a, 0xe4, 0x5b, 0x14, 0xd7, 0x9a, 0x23, 0x96, 0x5c, 0x20, 0x47, + 0xd1, 0x70, 0xa4, 0x7b, 0xad, 0x8b, 0x98, 0x94, 0xbf, 0x09, 0x85, 0x8b, 0xb7, 0x57, 0x99, 0x66, + 0x7b, 0xad, 0xd3, 0x13, 0x5d, 0x3f, 0xb7, 0xf0, 0xaa, 0x2a, 0xd3, 0x6c, 0xdf, 0xa1, 0xb0, 0x76, + 0xec, 0xc7, 0x23, 0x2f, 0x48, 0xe2, 0xec, 0x40, 0x55, 0x2a, 0x26, 0x94, 0xa3, 0xbf, 0x84, 0x13, + 0x98, 0x47, 0x40, 0x81, 0x56, 0x50, 0x78, 0xe2, 0x4d, 0xf8, 0xe7, 0x92, 0xb4, 0xa0, 0xc2, 0x03, + 0x37, 0x43, 0x2c, 0x23, 0xa2, 0xcc, 0x03, 0xd7, 0xe8, 0x3b, 0xff, 0xb4, 0xa0, 0xbc, 0xcf, 0x07, + 0xf1, 0x08, 0x47, 0xd5, 0x19, 0x6c, 0x85, 0x8a, 0xfb, 0x8e, 0xab, 0x25, 0x0e, 0x53, 0x49, 0x12, + 0x65, 0x32, 0x4b, 0x3e, 0xb9, 0xbe, 0xab, 0x53, 0x2a, 0x4c, 0x3a, 0xee, 0x76, 0x53, 0x16, 0xba, + 0xa9, 0xb9, 0x67, 0x65, 0xb2, 0xf9, 0x0b, 0x53, 0x80, 0xb3, 0xe2, 0x85, 0x37, 0xc3, 0xc2, 0x2c, + 0x76, 0xfe, 0xba, 0x02, 0x9b, 0x47, 0xe1, 0xc8, 0x1b, 0x32, 0xff, 0xd8, 0x84, 0x64, 0x66, 0xf8, + 0x57, 0x70, 0x3b, 0xff, 0x98, 0xd6, 0x2f, 0xd6, 0xb4, 0xc1, 0x7f, 0xfa, 0x5d, 0x86, 0x13, 0xb2, + 0xd1, 0x9a, 0x3b, 0x7f, 0x47, 0x7c, 0x0a, 0x6b, 0xda, 0xd6, 0x09, 0x4d, 0xe3, 0x26, 0x03, 0xe9, + 0xed, 0xc5, 0xb3, 0x23, 0xe9, 0x6e, 0x5a, 0x89, 0xae, 0x36, 0xfa, 0x29, 0x23, 0xb8, 0x8c, 0x7d, + 0x95, 0x3d, 0x93, 0x4c, 0xa1, 0x54, 0x8d, 0x34, 0x7d, 0x17, 0x3d, 0x81, 0xad, 0x04, 0x36, 0x77, + 0xbd, 0x63, 0xe9, 0x98, 0x97, 0x25, 0x45, 0xc0, 0xec, 0x25, 0xbf, 0x69, 0xac, 0xfa, 0xf9, 0xab, + 0x5e, 0x7f, 0x95, 0x6c, 0xaa, 0x64, 0xa1, 0xaf, 0xde, 0xf0, 0xab, 0xcc, 0x0f, 0x2b, 0x5a, 0x13, + 0xf3, 0xe3, 0xeb, 0xf7, 0x50, 0xc7, 0x02, 0xe2, 0x49, 0xbb, 0x3b, 0x43, 0x2c, 0x55, 0x7c, 0x06, + 0x55, 0x76, 0xde, 0xff, 0x3f, 0x46, 0x45, 0xef, 0xce, 0xf4, 0xb2, 0xbd, 0x60, 0x46, 0x51, 0xa2, + 0x1d, 0xcd, 0xcd, 0x2d, 0x0a, 0xd5, 0x08, 0x3b, 0x24, 0xf5, 0x7b, 0xd3, 0x77, 0x55, 0xbe, 0xaf, + 0xe8, 0x5a, 0x94, 0xef, 0xb2, 0xaf, 0xe0, 0xf6, 0x50, 0x4f, 0x88, 0xb1, 0x9e, 0x10, 0x29, 0x6f, + 0xe5, 0x86, 0x9f, 0x6c, 0x7e, 0xb6, 0xd0, 0xda, 0x15, 0x57, 0xc2, 0x7f, 0x08, 0x60, 0xda, 0x0d, + 0x5f, 0x07, 0xe6, 0x85, 0x7e, 0xef, 0xe6, 0x8d, 0x46, 0xcb, 0x6e, 0xba, 0xfc, 0xac, 0x58, 0xb2, + 0x6a, 0xcb, 0x9f, 0x15, 0x4b, 0x2b, 0xb5, 0xd5, 0xce, 0x9f, 0x2c, 0xa8, 0xcf, 0xf6, 0x85, 0x29, + 0x12, 0x72, 0x1f, 0x56, 0xcc, 0xcf, 0x37, 0x6c, 0xae, 0xca, 0xce, 0x26, 0xfe, 0x90, 0x49, 0x7e, + 0xd9, 0xd9, 0x7d, 0x5c, 0xd0, 0x04, 0x42, 0xf6, 0xa1, 0x88, 0x6f, 0x09, 0xd3, 0x38, 0xef, 0x7d, + 0xd7, 0x5b, 0x9d, 0xa2, 0x75, 0x6f, 0xef, 0xe5, 0xab, 0xd6, 0xd2, 0xd7, 0xaf, 0x5a, 0x4b, 0xdf, + 0xbe, 0x6a, 0x59, 0x7f, 0x98, 0xb6, 0xac, 0xbf, 0x4d, 0x5b, 0xd6, 0xbf, 0xa7, 0x2d, 0xeb, 0xe5, + 0xb4, 0x65, 0x7d, 0x33, 0x6d, 0x59, 0xff, 0x99, 0xb6, 0x96, 0xbe, 0x9d, 0xb6, 0xac, 0xbf, 0xbc, + 0x6e, 0x2d, 0xbd, 0x7c, 0xdd, 0x5a, 0xfa, 0xfa, 0x75, 0x6b, 0xe9, 0x59, 0x75, 0x86, 0x7a, 0xb0, + 0x82, 0x3f, 0xfa, 0xde, 0xff, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x09, 0xb7, 0x07, 0x45, + 0x0f, 0x00, 0x00, } func (this *MetadataInfo) Equal(that interface{}) bool { @@ -1333,6 +1432,45 @@ func (this *OTelEndpointConfig) Equal(that interface{}) bool { } return true } +func (this *ClickHouseConfig) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ClickHouseConfig) + if !ok { + that2, ok := that.(ClickHouseConfig) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Hostname != that1.Hostname { + return false + } + if this.Host != that1.Host { + return false + } + if this.Port != that1.Port { + return false + } + if this.Username != that1.Username { + return false + } + if this.Password != that1.Password { + return false + } + if this.Database != that1.Database { + return false + } + return true +} func (this *PluginConfig) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1456,6 +1594,9 @@ func (this *LogicalPlannerState) Equal(that interface{}) bool { if !this.PluginConfig.Equal(that1.PluginConfig) { return false } + if !this.ClickhouseConfig.Equal(that1.ClickhouseConfig) { + return false + } if !this.DebugInfo.Equal(that1.DebugInfo) { return false } @@ -1652,6 +1793,21 @@ func (this *OTelEndpointConfig) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ClickHouseConfig) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&distributedpb.ClickHouseConfig{") + s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") + s = append(s, "Password: "+fmt.Sprintf("%#v", this.Password)+",\n") + s = append(s, "Database: "+fmt.Sprintf("%#v", this.Database)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *PluginConfig) GoString() string { if this == nil { return "nil" @@ -1690,7 +1846,7 @@ func (this *LogicalPlannerState) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 12) + s := make([]string, 0, 13) s = append(s, "&distributedpb.LogicalPlannerState{") if this.DistributedState != nil { s = append(s, "DistributedState: "+fmt.Sprintf("%#v", this.DistributedState)+",\n") @@ -1709,6 +1865,9 @@ func (this *LogicalPlannerState) GoString() string { if this.PluginConfig != nil { s = append(s, "PluginConfig: "+fmt.Sprintf("%#v", this.PluginConfig)+",\n") } + if this.ClickhouseConfig != nil { + s = append(s, "ClickhouseConfig: "+fmt.Sprintf("%#v", this.ClickhouseConfig)+",\n") + } if this.DebugInfo != nil { s = append(s, "DebugInfo: "+fmt.Sprintf("%#v", this.DebugInfo)+",\n") } @@ -2274,6 +2433,69 @@ func (m *OTelEndpointConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ClickHouseConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClickHouseConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClickHouseConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Database) > 0 { + i -= len(m.Database) + copy(dAtA[i:], m.Database) + i = encodeVarintDistributedPlan(dAtA, i, uint64(len(m.Database))) + i-- + dAtA[i] = 0x32 + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintDistributedPlan(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x2a + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintDistributedPlan(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x22 + } + if m.Port != 0 { + i = encodeVarintDistributedPlan(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintDistributedPlan(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintDistributedPlan(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *PluginConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2401,6 +2623,18 @@ func (m *LogicalPlannerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ClickhouseConfig != nil { + { + size, err := m.ClickhouseConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDistributedPlan(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.DebugInfo != nil { { size, err := m.DebugInfo.MarshalToSizedBuffer(dAtA[:i]) @@ -2772,6 +3006,38 @@ func (m *OTelEndpointConfig) Size() (n int) { return n } +func (m *ClickHouseConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovDistributedPlan(uint64(l)) + } + l = len(m.Host) + if l > 0 { + n += 1 + l + sovDistributedPlan(uint64(l)) + } + if m.Port != 0 { + n += 1 + sovDistributedPlan(uint64(m.Port)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovDistributedPlan(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovDistributedPlan(uint64(l)) + } + l = len(m.Database) + if l > 0 { + n += 1 + l + sovDistributedPlan(uint64(l)) + } + return n +} + func (m *PluginConfig) Size() (n int) { if m == nil { return 0 @@ -2857,6 +3123,10 @@ func (m *LogicalPlannerState) Size() (n int) { l = m.DebugInfo.Size() n += 1 + l + sovDistributedPlan(uint64(l)) } + if m.ClickhouseConfig != nil { + l = m.ClickhouseConfig.Size() + n += 1 + l + sovDistributedPlan(uint64(l)) + } return n } @@ -3045,6 +3315,21 @@ func (this *OTelEndpointConfig) String() string { }, "") return s } +func (this *ClickHouseConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClickHouseConfig{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Password:` + fmt.Sprintf("%v", this.Password) + `,`, + `Database:` + fmt.Sprintf("%v", this.Database) + `,`, + `}`, + }, "") + return s +} func (this *PluginConfig) String() string { if this == nil { return "nil" @@ -3095,6 +3380,7 @@ func (this *LogicalPlannerState) String() string { `OTelEndpointConfig:` + strings.Replace(this.OTelEndpointConfig.String(), "OTelEndpointConfig", "OTelEndpointConfig", 1) + `,`, `PluginConfig:` + strings.Replace(this.PluginConfig.String(), "PluginConfig", "PluginConfig", 1) + `,`, `DebugInfo:` + strings.Replace(this.DebugInfo.String(), "DebugInfo", "DebugInfo", 1) + `,`, + `ClickhouseConfig:` + strings.Replace(this.ClickhouseConfig.String(), "ClickHouseConfig", "ClickHouseConfig", 1) + `,`, `}`, }, "") return s @@ -4705,6 +4991,235 @@ func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { } return nil } +func (m *ClickHouseConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDistributedPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClickHouseConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClickHouseConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDistributedPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDistributedPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDistributedPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDistributedPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDistributedPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDistributedPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDistributedPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDistributedPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDistributedPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDistributedPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDistributedPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDistributedPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDistributedPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDistributedPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDistributedPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDistributedPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Database = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDistributedPlan(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDistributedPlan + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PluginConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5300,6 +5815,42 @@ func (m *LogicalPlannerState) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClickhouseConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDistributedPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDistributedPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDistributedPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClickhouseConfig == nil { + m.ClickhouseConfig = &ClickHouseConfig{} + } + if err := m.ClickhouseConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDistributedPlan(dAtA[iNdEx:]) diff --git a/src/carnot/planner/file_source/BUILD.bazel b/src/carnot/planner/file_source/BUILD.bazel deleted file mode 100644 index 2d00258245f..00000000000 --- a/src/carnot/planner/file_source/BUILD.bazel +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2018- The Pixie Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -load("//bazel:pl_build_system.bzl", "pl_cc_binary", "pl_cc_library", "pl_cc_test") - -package(default_visibility = [ - "//src/carnot:__subpackages__", - "//src/experimental/standalone_pem:__subpackages__", # TODO(ddelnano): Is this needed? -]) - -pl_cc_library( - name = "cc_library", - srcs = glob( - [ - "*.cc", - "*.h", - ], - exclude = [ - "**/*_test.cc", - "**/*_test_utils.h", - ], - ), - hdrs = ["file_source.h"], - deps = [ - "//src/carnot/planner/objects:cc_library", - "//src/carnot/planner/probes:cc_library", - "//src/common/uuid:cc_library", # TODO(ddelnano): This may not be needed - ], -) - -pl_cc_test( - name = "file_source_test", - srcs = ["file_source_test.cc"], - deps = [ - ":cc_library", - "//src/carnot/planner:test_utils", - "//src/carnot/planner/compiler:cc_library", - ], -) diff --git a/src/carnot/planner/file_source/file_source.cc b/src/carnot/planner/file_source/file_source.cc deleted file mode 100644 index 4e7c0e88a96..00000000000 --- a/src/carnot/planner/file_source/file_source.cc +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "src/carnot/planner/file_source/file_source.h" - -namespace px { -namespace carnot { -namespace planner { -namespace compiler {} // namespace compiler -} // namespace planner -} // namespace carnot -} // namespace px diff --git a/src/carnot/planner/file_source/file_source.h b/src/carnot/planner/file_source/file_source.h deleted file mode 100644 index e15c1f734ac..00000000000 --- a/src/carnot/planner/file_source/file_source.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include "src/carnot/planner/objects/funcobject.h" - -namespace px { -namespace carnot { -namespace planner { -namespace compiler { - -class FileSourceIR { - /* public: */ - - /* private: */ -}; - -} // namespace compiler -} // namespace planner -} // namespace carnot -} // namespace px diff --git a/src/carnot/planner/file_source/file_source_test.cc b/src/carnot/planner/file_source/file_source_test.cc deleted file mode 100644 index 1105a3b26d6..00000000000 --- a/src/carnot/planner/file_source/file_source_test.cc +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "src/carnot/planner/compiler/ast_visitor.h" -#include "src/carnot/planner/compiler/test_utils.h" -#include "src/carnot/planner/probes/probes.h" - -namespace px { -namespace carnot { -namespace planner { -namespace compiler { -using ::testing::ContainsRegex; -using ::testing::Not; -using ::testing::UnorderedElementsAre; - -constexpr char kSingleFileSource[] = R"pxl( -import pxlog - -glob_pattern = 'test.json' -pxlog.FileSource(glob_pattern, 'test_table', '5m') -)pxl"; - -constexpr char kSingleFileSourceProgramPb[] = R"pxl( -glob_pattern: "test.json" -table_name: "test_table" -ttl { - seconds: 300 -} -)pxl"; - -class FileSourceCompilerTest : public ASTVisitorTest { - protected: - StatusOr> CompileFileSourceScript( - std::string_view query, const ExecFuncs& exec_funcs = {}) { - absl::flat_hash_set reserved_names; - for (const auto& func : exec_funcs) { - reserved_names.insert(func.output_table_prefix()); - } - auto func_based_exec = exec_funcs.size() > 0; - - Parser parser; - PX_ASSIGN_OR_RETURN(auto ast, parser.Parse(query)); - - std::shared_ptr ir = std::make_shared(); - std::shared_ptr mutation_ir = std::make_shared(); - - ModuleHandler module_handler; - PX_ASSIGN_OR_RETURN(auto ast_walker, compiler::ASTVisitorImpl::Create( - ir.get(), mutation_ir.get(), compiler_state_.get(), - &module_handler, func_based_exec, reserved_names, {})); - - PX_RETURN_IF_ERROR(ast_walker->ProcessModuleNode(ast)); - if (func_based_exec) { - PX_RETURN_IF_ERROR(ast_walker->ProcessExecFuncs(exec_funcs)); - } - return mutation_ir; - } -}; - -// TODO(ddelnano): Add test that verifies missing arguments provides a compiler error -// instead of the "Query should not be empty" error. There seems to be a bug where default -// arguments are not being handled correctly. - -TEST_F(FileSourceCompilerTest, parse_single_file_source) { - ASSERT_OK_AND_ASSIGN(auto mutation_ir, CompileFileSourceScript(kSingleFileSource)); - plannerpb::CompileMutationsResponse pb; - EXPECT_OK(mutation_ir->ToProto(&pb)); - ASSERT_EQ(pb.mutations_size(), 1); - EXPECT_THAT(pb.mutations()[0].file_source(), - testing::proto::EqualsProto(kSingleFileSourceProgramPb)); -} - -} // namespace compiler -} // namespace planner -} // namespace carnot -} // namespace px diff --git a/src/carnot/planner/file_source/ir/BUILD.bazel b/src/carnot/planner/file_source/ir/BUILD.bazel deleted file mode 100644 index 759282f6c38..00000000000 --- a/src/carnot/planner/file_source/ir/BUILD.bazel +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2018- The Pixie Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -load("//bazel:proto_compile.bzl", "pl_cc_proto_library", "pl_go_proto_library", "pl_proto_library") - -package(default_visibility = ["//src:__subpackages__"]) - -pl_proto_library( - name = "logical_pl_proto", - srcs = ["logical.proto"], - deps = [ - "@gogo_grpc_proto//gogoproto:gogo_pl_proto", - ], -) - -pl_cc_proto_library( - name = "logical_pl_cc_proto", - proto = ":logical_pl_proto", - deps = [ - "@gogo_grpc_proto//gogoproto:gogo_pl_cc_proto", - ], -) - -pl_go_proto_library( - name = "logical_pl_go_proto", - importpath = "px.dev/pixie/src/carnot/planner/file_source/ir", - proto = ":logical_pl_proto", -) diff --git a/src/carnot/planner/file_source/ir/logical.pb.go b/src/carnot/planner/file_source/ir/logical.pb.go deleted file mode 100755 index f424f8ec525..00000000000 --- a/src/carnot/planner/file_source/ir/logical.pb.go +++ /dev/null @@ -1,567 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: src/carnot/planner/file_source/ir/logical.proto - -package ir - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type FileSourceDeployment struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - GlobPattern string `protobuf:"bytes,2,opt,name=glob_pattern,json=globPattern,proto3" json:"glob_pattern,omitempty"` - TableName string `protobuf:"bytes,3,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` - TTL *types.Duration `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` -} - -func (m *FileSourceDeployment) Reset() { *m = FileSourceDeployment{} } -func (*FileSourceDeployment) ProtoMessage() {} -func (*FileSourceDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_452b4826b1190f86, []int{0} -} -func (m *FileSourceDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileSourceDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FileSourceDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FileSourceDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileSourceDeployment.Merge(m, src) -} -func (m *FileSourceDeployment) XXX_Size() int { - return m.Size() -} -func (m *FileSourceDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_FileSourceDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_FileSourceDeployment proto.InternalMessageInfo - -func (m *FileSourceDeployment) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *FileSourceDeployment) GetGlobPattern() string { - if m != nil { - return m.GlobPattern - } - return "" -} - -func (m *FileSourceDeployment) GetTableName() string { - if m != nil { - return m.TableName - } - return "" -} - -func (m *FileSourceDeployment) GetTTL() *types.Duration { - if m != nil { - return m.TTL - } - return nil -} - -func init() { - proto.RegisterType((*FileSourceDeployment)(nil), "px.carnot.planner.file_source.ir.FileSourceDeployment") -} - -func init() { - proto.RegisterFile("src/carnot/planner/file_source/ir/logical.proto", fileDescriptor_452b4826b1190f86) -} - -var fileDescriptor_452b4826b1190f86 = []byte{ - // 302 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0x42, 0x31, - 0x18, 0x85, 0x6f, 0x81, 0x68, 0x28, 0x4e, 0x37, 0x0c, 0x48, 0xe2, 0x2f, 0x3a, 0x31, 0xb5, 0x89, - 0x3a, 0x38, 0x13, 0xe2, 0x64, 0x8c, 0x41, 0x26, 0x17, 0xd2, 0x7b, 0x2d, 0x4d, 0x93, 0xd2, 0xff, - 0xa6, 0x94, 0x44, 0x37, 0x1f, 0xc1, 0x67, 0x70, 0xf2, 0x51, 0x1c, 0x19, 0x99, 0x8c, 0xf4, 0x2e, - 0x8e, 0x3c, 0x82, 0xb9, 0xbd, 0x98, 0xb8, 0xfd, 0xff, 0x39, 0xdf, 0x39, 0x39, 0x94, 0x2f, 0x5d, - 0xce, 0x73, 0xe1, 0x2c, 0x7a, 0x5e, 0x18, 0x61, 0xad, 0x74, 0x7c, 0xae, 0x8d, 0x9c, 0x2d, 0x71, - 0xe5, 0x72, 0xc9, 0xb5, 0xe3, 0x06, 0x95, 0xce, 0x85, 0x61, 0x85, 0x43, 0x8f, 0xe9, 0xa0, 0x78, - 0x66, 0x35, 0xcf, 0xf6, 0x3c, 0xfb, 0xc7, 0x33, 0xed, 0xfa, 0x5d, 0x85, 0x0a, 0x23, 0xcc, 0xab, - 0xab, 0xce, 0xf5, 0x41, 0x21, 0x2a, 0x23, 0x79, 0xfc, 0xb2, 0xd5, 0x9c, 0x3f, 0xad, 0x9c, 0xf0, - 0x1a, 0x6d, 0xed, 0x9f, 0xbf, 0x13, 0xda, 0xbd, 0xd1, 0x46, 0x3e, 0xc4, 0x9e, 0xb1, 0x2c, 0x0c, - 0xbe, 0x2c, 0xa4, 0xf5, 0x69, 0x4a, 0x5b, 0x56, 0x2c, 0x64, 0x8f, 0x0c, 0xc8, 0xb0, 0x3d, 0x89, - 0x77, 0x7a, 0x46, 0x8f, 0x94, 0xc1, 0x6c, 0x56, 0x08, 0xef, 0xa5, 0xb3, 0xbd, 0x46, 0xf4, 0x3a, - 0x95, 0x76, 0x5f, 0x4b, 0xe9, 0x09, 0xa5, 0x5e, 0x64, 0x46, 0xce, 0x62, 0xb8, 0x19, 0x81, 0x76, - 0x54, 0xee, 0xaa, 0x86, 0x2b, 0xda, 0xf4, 0xde, 0xf4, 0x5a, 0x03, 0x32, 0xec, 0x5c, 0x1c, 0xb3, - 0x7a, 0x1c, 0xfb, 0x1b, 0xc7, 0xc6, 0xfb, 0x71, 0xa3, 0xc3, 0xf0, 0x75, 0xda, 0x9c, 0x4e, 0x6f, - 0x27, 0x15, 0x3e, 0xba, 0x5e, 0x6f, 0x21, 0xd9, 0x6c, 0x21, 0xd9, 0x6d, 0x81, 0xbc, 0x06, 0x20, - 0x1f, 0x01, 0xc8, 0x67, 0x00, 0xb2, 0x0e, 0x40, 0xbe, 0x03, 0x90, 0x9f, 0x00, 0xc9, 0x2e, 0x00, - 0x79, 0x2b, 0x21, 0x59, 0x97, 0x90, 0x6c, 0x4a, 0x48, 0x1e, 0x1b, 0xda, 0x65, 0x07, 0xb1, 0xfa, - 0xf2, 0x37, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x07, 0x40, 0x1c, 0x70, 0x01, 0x00, 0x00, -} - -func (this *FileSourceDeployment) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*FileSourceDeployment) - if !ok { - that2, ok := that.(FileSourceDeployment) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.GlobPattern != that1.GlobPattern { - return false - } - if this.TableName != that1.TableName { - return false - } - if !this.TTL.Equal(that1.TTL) { - return false - } - return true -} -func (this *FileSourceDeployment) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&ir.FileSourceDeployment{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "GlobPattern: "+fmt.Sprintf("%#v", this.GlobPattern)+",\n") - s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") - if this.TTL != nil { - s = append(s, "TTL: "+fmt.Sprintf("%#v", this.TTL)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringLogical(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *FileSourceDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileSourceDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileSourceDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TTL != nil { - { - size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLogical(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarintLogical(dAtA, i, uint64(len(m.TableName))) - i-- - dAtA[i] = 0x1a - } - if len(m.GlobPattern) > 0 { - i -= len(m.GlobPattern) - copy(dAtA[i:], m.GlobPattern) - i = encodeVarintLogical(dAtA, i, uint64(len(m.GlobPattern))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintLogical(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintLogical(dAtA []byte, offset int, v uint64) int { - offset -= sovLogical(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *FileSourceDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovLogical(uint64(l)) - } - l = len(m.GlobPattern) - if l > 0 { - n += 1 + l + sovLogical(uint64(l)) - } - l = len(m.TableName) - if l > 0 { - n += 1 + l + sovLogical(uint64(l)) - } - if m.TTL != nil { - l = m.TTL.Size() - n += 1 + l + sovLogical(uint64(l)) - } - return n -} - -func sovLogical(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLogical(x uint64) (n int) { - return sovLogical(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *FileSourceDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FileSourceDeployment{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `GlobPattern:` + fmt.Sprintf("%v", this.GlobPattern) + `,`, - `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, - `TTL:` + strings.Replace(fmt.Sprintf("%v", this.TTL), "Duration", "types.Duration", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringLogical(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *FileSourceDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogical - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileSourceDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileSourceDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogical - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogical - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogical - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GlobPattern", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogical - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogical - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogical - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GlobPattern = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogical - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLogical - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLogical - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TableName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLogical - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLogical - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLogical - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TTL == nil { - m.TTL = &types.Duration{} - } - if err := m.TTL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLogical(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLogical - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLogical(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogical - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogical - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLogical - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLogical - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLogical - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLogical - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLogical = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLogical = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLogical = fmt.Errorf("proto: unexpected end of group") -) diff --git a/src/carnot/planner/file_source/ir/logical.proto b/src/carnot/planner/file_source/ir/logical.proto deleted file mode 100644 index 7b64203c214..00000000000 --- a/src/carnot/planner/file_source/ir/logical.proto +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -syntax = "proto3"; - -package px.carnot.planner.file_source.ir; - -option go_package = "ir"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/duration.proto"; - -// A logical file source deployment -message FileSourceDeployment { - // For now this is the same as glob_pattern, but in the future may provide a logical name for the - // file source. - string name = 1; - // The glob pattern to use to find files to read. - string glob_pattern = 2; - // The table name to write the data to. - string table_name = 3; - // The ttl to run the file source for. -1 indicates that the file source should run indefinitely. - google.protobuf.Duration ttl = 4 [ (gogoproto.customname) = "TTL" ]; -} diff --git a/src/carnot/planner/file_source/log_module.cc b/src/carnot/planner/file_source/log_module.cc deleted file mode 100644 index 6df5e582311..00000000000 --- a/src/carnot/planner/file_source/log_module.cc +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "src/carnot/planner/file_source/log_module.h" - -namespace px { -namespace carnot { -namespace planner { -namespace compiler { - -class FileSourceHandler { - public: - static StatusOr Eval(MutationsIR* mutations_ir, const pypa::AstPtr& ast, - const ParsedArgs& args, ASTVisitor* visitor); -}; - -class DeleteFileSourceHandler { - public: - static StatusOr Eval(MutationsIR* mutations_ir, const pypa::AstPtr& ast, - const ParsedArgs& args, ASTVisitor* visitor); -}; - -StatusOr> LogModule::Create(MutationsIR* mutations_ir, - ASTVisitor* ast_visitor) { - auto tracing_module = std::shared_ptr(new LogModule(mutations_ir, ast_visitor)); - PX_RETURN_IF_ERROR(tracing_module->Init()); - return tracing_module; -} - -Status LogModule::Init() { - PX_ASSIGN_OR_RETURN( - std::shared_ptr upsert_fn, - FuncObject::Create(kFileSourceID, {"glob_pattern", "table_name", "ttl"}, {}, - /* has_variable_len_args */ false, - /* has_variable_len_kwargs */ false, - std::bind(FileSourceHandler::Eval, mutations_ir_, std::placeholders::_1, - std::placeholders::_2, std::placeholders::_3), - ast_visitor())); - PX_RETURN_IF_ERROR(upsert_fn->SetDocString(kFileSourceDocstring)); - AddMethod(kFileSourceID, upsert_fn); - - PX_ASSIGN_OR_RETURN(std::shared_ptr delete_fn, - FuncObject::Create(kFileSourceID, {"name"}, {}, - /* has_variable_len_args */ false, - /* has_variable_len_kwargs */ false, - std::bind(DeleteFileSourceHandler::Eval, mutations_ir_, - std::placeholders::_1, std::placeholders::_2, - std::placeholders::_3), - ast_visitor())); - PX_RETURN_IF_ERROR(upsert_fn->SetDocString(kDeleteFileSourceDocstring)); - AddMethod(kDeleteFileSourceID, delete_fn); - - return Status::OK(); -} - -StatusOr FileSourceHandler::Eval(MutationsIR* mutations_ir, const pypa::AstPtr& ast, - const ParsedArgs& args, ASTVisitor* visitor) { - DCHECK(mutations_ir); - - PX_ASSIGN_OR_RETURN(auto glob_pattern_ir, GetArgAs(ast, args, "glob_pattern")); - PX_ASSIGN_OR_RETURN(auto table_name_ir, GetArgAs(ast, args, "table_name")); - PX_ASSIGN_OR_RETURN(auto ttl_ir, GetArgAs(ast, args, "ttl")); - - const std::string& glob_pattern_str = glob_pattern_ir->str(); - const std::string& table_name_str = table_name_ir->str(); - PX_ASSIGN_OR_RETURN(int64_t ttl_ns, StringToTimeInt(ttl_ir->str())); - - mutations_ir->CreateFileSourceDeployment(glob_pattern_str, table_name_str, ttl_ns); - - return std::static_pointer_cast(std::make_shared(ast, visitor)); -} - -StatusOr DeleteFileSourceHandler::Eval(MutationsIR* mutations_ir, - const pypa::AstPtr& ast, const ParsedArgs& args, - ASTVisitor* visitor) { - DCHECK(mutations_ir); - - PX_ASSIGN_OR_RETURN(auto glob_pattern_ir, GetArgAs(ast, args, "name")); - const std::string& glob_pattern_str = glob_pattern_ir->str(); - - mutations_ir->DeleteFileSource(glob_pattern_str); - - return std::static_pointer_cast(std::make_shared(ast, visitor)); -} - -} // namespace compiler -} // namespace planner -} // namespace carnot -} // namespace px diff --git a/src/carnot/planner/file_source/log_module.h b/src/carnot/planner/file_source/log_module.h deleted file mode 100644 index 5d5520dafa5..00000000000 --- a/src/carnot/planner/file_source/log_module.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once -#include -#include -#include -#include - -#include "src/carnot/planner/compiler_state/compiler_state.h" -#include "src/carnot/planner/objects/funcobject.h" -#include "src/carnot/planner/objects/none_object.h" -#include "src/carnot/planner/probes/probes.h" - -namespace px { -namespace carnot { -namespace planner { -namespace compiler { - -class LogModule : public QLObject { - public: - static constexpr TypeDescriptor LogModuleType = { - /* name */ "pxlog", - /* type */ QLObjectType::kLogModule, - }; - static StatusOr> Create(MutationsIR* mutations_ir, - ASTVisitor* ast_visitor); - - // Constant for the modules. - inline static constexpr char kLogModuleObjName[] = "pxlog"; - - inline static constexpr char kFileSourceID[] = "FileSource"; - inline static constexpr char kFileSourceDocstring[] = R"doc( - TBD - )doc"; - - inline static constexpr char kDeleteFileSourceID[] = "DeleteFileSource"; - inline static constexpr char kDeleteFileSourceDocstring[] = R"doc( - TBD - )doc"; - - protected: - explicit LogModule(MutationsIR* mutations_ir, ASTVisitor* ast_visitor) - : QLObject(LogModuleType, ast_visitor), mutations_ir_(mutations_ir) {} - Status Init(); - - private: - MutationsIR* mutations_ir_; -}; - -} // namespace compiler -} // namespace planner -} // namespace carnot -} // namespace px diff --git a/src/carnot/planner/ir/clickhouse_export_sink_ir.cc b/src/carnot/planner/ir/clickhouse_export_sink_ir.cc index 3137cbc2c7a..b4492ff8ede 100644 --- a/src/carnot/planner/ir/clickhouse_export_sink_ir.cc +++ b/src/carnot/planner/ir/clickhouse_export_sink_ir.cc @@ -67,7 +67,6 @@ StatusOr ClickHouseExportSinkIR::ParseClickHouseDSN(co } Status ClickHouseExportSinkIR::ToProto(planpb::Operator* op) const { - PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); op->set_op_type(planpb::CLICKHOUSE_EXPORT_SINK_OPERATOR); auto clickhouse_op = op->mutable_clickhouse_sink_op(); diff --git a/src/carnot/planner/ir/clickhouse_export_sink_ir.h b/src/carnot/planner/ir/clickhouse_export_sink_ir.h index c6e65e16538..f4bc98246d6 100644 --- a/src/carnot/planner/ir/clickhouse_export_sink_ir.h +++ b/src/carnot/planner/ir/clickhouse_export_sink_ir.h @@ -37,10 +37,9 @@ namespace planner { * @brief The IR representation for the ClickHouseExportSink operator. * Represents a configuration to export a DataFrame to a ClickHouse database. */ -class ClickHouseExportSinkIR : public SinkOperatorIR { +class ClickHouseExportSinkIR : public OperatorIR { public: - explicit ClickHouseExportSinkIR(int64_t id, std::string mutation_id) - : SinkOperatorIR(id, IRNodeType::kClickHouseExportSink, mutation_id) {} + explicit ClickHouseExportSinkIR(int64_t id) : OperatorIR(id, IRNodeType::kClickHouseExportSink) {} Status Init(OperatorIR* parent, const std::string& table_name, const std::string& clickhouse_dsn); diff --git a/src/carnot/planner/ir/grpc_sink_ir.cc b/src/carnot/planner/ir/grpc_sink_ir.cc index 786da032781..b087d3eaefc 100644 --- a/src/carnot/planner/ir/grpc_sink_ir.cc +++ b/src/carnot/planner/ir/grpc_sink_ir.cc @@ -24,7 +24,6 @@ namespace planner { Status GRPCSinkIR::CopyFromNodeImpl(const IRNode* node, absl::flat_hash_map*) { - PX_RETURN_IF_ERROR(SinkOperatorIR::CopyFromNodeImpl(node, nullptr)); const GRPCSinkIR* grpc_sink = static_cast(node); sink_type_ = grpc_sink->sink_type_; destination_id_ = grpc_sink->destination_id_; @@ -36,7 +35,6 @@ Status GRPCSinkIR::CopyFromNodeImpl(const IRNode* node, } Status GRPCSinkIR::ToProto(planpb::Operator* op) const { - PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); CHECK(has_output_table()); auto pb = op->mutable_grpc_sink_op(); op->set_op_type(planpb::GRPC_SINK_OPERATOR); @@ -56,7 +54,6 @@ Status GRPCSinkIR::ToProto(planpb::Operator* op) const { } Status GRPCSinkIR::ToProto(planpb::Operator* op, int64_t agent_id) const { - PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); auto pb = op->mutable_grpc_sink_op(); op->set_op_type(planpb::GRPC_SINK_OPERATOR); pb->set_address(destination_address()); diff --git a/src/carnot/planner/ir/grpc_sink_ir.h b/src/carnot/planner/ir/grpc_sink_ir.h index 9dea6307de3..b8ef691a6f6 100644 --- a/src/carnot/planner/ir/grpc_sink_ir.h +++ b/src/carnot/planner/ir/grpc_sink_ir.h @@ -43,10 +43,9 @@ namespace planner { * 1. SetDistributedID(string): Set the name of the node same as the query broker. * 2. SetDestinationAddress(string): the GRPC address where batches should be sent. */ -class GRPCSinkIR : public SinkOperatorIR { +class GRPCSinkIR : public OperatorIR { public: - explicit GRPCSinkIR(int64_t id, std::string mutation_id) - : SinkOperatorIR(id, IRNodeType::kGRPCSink, mutation_id) {} + explicit GRPCSinkIR(int64_t id) : OperatorIR(id, IRNodeType::kGRPCSink) {} enum GRPCSinkType { kTypeNotSet = 0, @@ -111,17 +110,6 @@ class GRPCSinkIR : public SinkOperatorIR { destination_ssl_targetname_ = ssl_targetname; } - std::string DebugString() const override { - auto sink_op_str = SinkOperatorIR::DebugString(); - std::vector agent_ids; - for (const auto& [agent_id, _] : agent_id_to_destination_id_) { - agent_ids.push_back(agent_id); - } - return absl::Substitute("$0(id=$1, destination_id=$2, destination_address=$3, sink_type=$4, agent_ids=$5 sink_op=$6)", - type_string(), id(), destination_id_, destination_address_, - sink_type_, absl::StrJoin(agent_ids, ","), sink_op_str); - } - const std::string& destination_address() const { return destination_address_; } bool DestinationAddressSet() const { return destination_address_ != ""; } const std::string& destination_ssl_targetname() const { return destination_ssl_targetname_; } diff --git a/src/carnot/planner/ir/ir.h b/src/carnot/planner/ir/ir.h index df5c88aecae..faeb0623eea 100644 --- a/src/carnot/planner/ir/ir.h +++ b/src/carnot/planner/ir/ir.h @@ -49,7 +49,6 @@ namespace planner { class ExpressionIR; class OperatorIR; -class SinkOperatorIR; /** * IR contains the intermediate representation of the query @@ -78,13 +77,7 @@ class IR { template StatusOr MakeNode(int64_t id, const pypa::AstPtr& ast) { id_node_counter = std::max(id + 1, id_node_counter); - std::unique_ptr node; - if constexpr (std::is_base_of_v) { - auto mutation_id = mutation_id_.value_or(""); - node = std::make_unique(id, mutation_id); - } else { - node = std::make_unique(id); - } + auto node = std::make_unique(id); dag_.AddNode(node->id()); node->set_graph(this); if (ast != nullptr) { @@ -130,9 +123,6 @@ class IR { } // Use the source's ID if we are copying in to a different graph. auto new_node_id = this == source->graph() ? id_node_counter : source->id(); - if (this != source->graph()) { - mutation_id_ = source->graph()->mutation_id(); - } DCHECK(!HasNode(new_node_id)) << source->DebugString(); PX_ASSIGN_OR_RETURN(IRNode * new_node, MakeNodeWithType(source->type(), new_node_id)); PX_RETURN_IF_ERROR(new_node->CopyFromNode(source, copied_nodes_map)); @@ -268,13 +258,6 @@ class IR { return nodes; } - void RecordMutationId(std::optional mutation_id) { - DCHECK(!mutation_id_.has_value()) << "Mutation ID should only be set once."; - mutation_id_ = mutation_id; - } - - std::optional mutation_id() const { return mutation_id_; } - friend std::ostream& operator<<(std::ostream& os, const std::shared_ptr&) { return os << "ir"; } @@ -287,7 +270,6 @@ class IR { plan::DAG dag_; std::unordered_map id_node_map_; int64_t id_node_counter = 0; - std::optional mutation_id_ = std::nullopt; }; Status ResolveOperatorType(OperatorIR* op, CompilerState* compiler_state); diff --git a/src/carnot/planner/ir/memory_sink_ir.cc b/src/carnot/planner/ir/memory_sink_ir.cc index 7e8fffee763..943e165f47a 100644 --- a/src/carnot/planner/ir/memory_sink_ir.cc +++ b/src/carnot/planner/ir/memory_sink_ir.cc @@ -31,7 +31,6 @@ Status MemorySinkIR::Init(OperatorIR* parent, const std::string& name, } Status MemorySinkIR::ToProto(planpb::Operator* op) const { - PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); auto pb = op->mutable_mem_sink_op(); pb->set_name(name_); op->set_op_type(planpb::MEMORY_SINK_OPERATOR); @@ -48,7 +47,6 @@ Status MemorySinkIR::ToProto(planpb::Operator* op) const { Status MemorySinkIR::CopyFromNodeImpl(const IRNode* node, absl::flat_hash_map*) { - PX_RETURN_IF_ERROR(SinkOperatorIR::CopyFromNodeImpl(node, nullptr)); const MemorySinkIR* sink_ir = static_cast(node); name_ = sink_ir->name_; out_columns_ = sink_ir->out_columns_; diff --git a/src/carnot/planner/ir/memory_sink_ir.h b/src/carnot/planner/ir/memory_sink_ir.h index eb50373a41f..c43b36698f3 100644 --- a/src/carnot/planner/ir/memory_sink_ir.h +++ b/src/carnot/planner/ir/memory_sink_ir.h @@ -38,11 +38,10 @@ namespace planner { /** * The MemorySinkIR describes the MemorySink operator. */ -class MemorySinkIR : public SinkOperatorIR { +class MemorySinkIR : public OperatorIR { public: MemorySinkIR() = delete; - explicit MemorySinkIR(int64_t id, std::string mutation_id) - : SinkOperatorIR(id, IRNodeType::kMemorySink, mutation_id) {} + explicit MemorySinkIR(int64_t id) : OperatorIR(id, IRNodeType::kMemorySink) {} std::string name() const { return name_; } void set_name(const std::string& name) { name_ = name; } diff --git a/src/carnot/planner/ir/memory_source_ir.cc b/src/carnot/planner/ir/memory_source_ir.cc index 18e92dc2107..fc367ce7fc0 100644 --- a/src/carnot/planner/ir/memory_source_ir.cc +++ b/src/carnot/planner/ir/memory_source_ir.cc @@ -29,7 +29,6 @@ std::string MemorySourceIR::DebugString() const { } Status MemorySourceIR::ToProto(planpb::Operator* op) const { - PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); auto pb = op->mutable_mem_source_op(); op->set_op_type(planpb::MEMORY_SOURCE_OPERATOR); pb->set_name(table_name_); diff --git a/src/carnot/planner/ir/memory_source_ir.h b/src/carnot/planner/ir/memory_source_ir.h index 2339ab4b27f..757632d2096 100644 --- a/src/carnot/planner/ir/memory_source_ir.h +++ b/src/carnot/planner/ir/memory_source_ir.h @@ -40,10 +40,10 @@ namespace planner { * @brief The MemorySourceIR is a dual logical plan * and IR node operator. It inherits from both classes */ -class MemorySourceIR : public SinkOperatorIR { +class MemorySourceIR : public OperatorIR { public: MemorySourceIR() = delete; - explicit MemorySourceIR(int64_t id, std::string mutation_id) : SinkOperatorIR(id, IRNodeType::kMemorySource, mutation_id) {} + explicit MemorySourceIR(int64_t id) : OperatorIR(id, IRNodeType::kMemorySource) {} /** * @brief Initialize the memory source. diff --git a/src/carnot/planner/ir/operator_ir.h b/src/carnot/planner/ir/operator_ir.h index c899679f8bb..a719432efec 100644 --- a/src/carnot/planner/ir/operator_ir.h +++ b/src/carnot/planner/ir/operator_ir.h @@ -181,40 +181,6 @@ class OperatorIR : public IRNode { std::vector parent_types_; bool parent_types_set_ = false; }; - -class SinkOperatorIR : public OperatorIR { - public: - std::string DebugString() const { - return absl::Substitute("$0(id=$1, mutation_id=$2)", type_string(), id(), mutation_id_); - } - - protected: - explicit SinkOperatorIR(int64_t id, IRNodeType type, std::string mutation_id) - : OperatorIR(id, type), mutation_id_(mutation_id) {} - - virtual Status ToProto(planpb::Operator* op) const { - if (mutation_id_.empty()) { - return Status::OK(); - } - auto context = op->mutable_context(); - context->insert({"mutation_id", mutation_id_}); - return Status::OK(); - } - - /** - * @brief Override of CopyFromNode that adds special handling for Operators. - */ - virtual Status CopyFromNodeImpl(const IRNode* node, - absl::flat_hash_map*) { - const SinkOperatorIR* source = static_cast(node); - mutation_id_ = source->mutation_id_; - return Status::OK(); - } - - private: - std::string mutation_id_; -}; - } // namespace planner } // namespace carnot } // namespace px diff --git a/src/carnot/planner/ir/otel_export_sink_ir.cc b/src/carnot/planner/ir/otel_export_sink_ir.cc index defa26e3ee3..672ca2c5767 100644 --- a/src/carnot/planner/ir/otel_export_sink_ir.cc +++ b/src/carnot/planner/ir/otel_export_sink_ir.cc @@ -18,8 +18,6 @@ #include -#include - #include "src/carnot/planner/ir/ir.h" #include "src/carnot/planner/ir/otel_export_sink_ir.h" #include "src/carnot/planpb/plan.pb.h" @@ -162,34 +160,10 @@ Status OTelExportSinkIR::ProcessConfig(const OTelData& data) { new_span.span_kind = span.span_kind; data_.spans.push_back(std::move(new_span)); } - for (const auto& log : data.logs) { - OTelLog new_log; - - PX_ASSIGN_OR_RETURN(new_log.time_column, AddColumn(log.time_column)); - PX_ASSIGN_OR_RETURN(new_log.body_column, AddColumn(log.body_column)); - if (log.observed_time_column != nullptr) { - PX_ASSIGN_OR_RETURN(new_log.observed_time_column, AddColumn(log.observed_time_column)); - } - - new_log.severity_text = log.severity_text; - new_log.severity_number = log.severity_number; - - for (const auto& attr : log.attributes) { - if (attr.column_reference == nullptr) { - new_log.attributes.push_back({attr.name, nullptr, attr.string_value}); - continue; - } - PX_ASSIGN_OR_RETURN(auto column, AddColumn(attr.column_reference)); - new_log.attributes.push_back({attr.name, column, ""}); - } - - data_.logs.push_back(std::move(new_log)); - } return Status::OK(); } Status OTelExportSinkIR::ToProto(planpb::Operator* op) const { - PX_RETURN_IF_ERROR(SinkOperatorIR::ToProto(op)); op->set_op_type(planpb::OTEL_EXPORT_SINK_OPERATOR); auto otel_op = op->mutable_otel_sink_op(); *otel_op->mutable_endpoint_config() = data_.endpoint_config; @@ -356,56 +330,11 @@ Status OTelExportSinkIR::ToProto(planpb::Operator* op) const { } span_pb->set_kind_value(span.span_kind); } - for (const auto& log : data_.logs) { - auto log_pb = otel_op->add_logs(); - - if (log.time_column->EvaluatedDataType() != types::TIME64NS) { - return log.time_column->CreateIRNodeError( - "Expected time column '$0' to be TIME64NS, received $1", log.time_column->col_name(), - types::ToString(log.time_column->EvaluatedDataType())); - } - PX_ASSIGN_OR_RETURN(auto time_column_index, - log.time_column->GetColumnIndex()); - log_pb->set_time_column_index(time_column_index); - - if (log.observed_time_column != nullptr) { - if (log.observed_time_column->EvaluatedDataType() != types::TIME64NS) { - return log.observed_time_column->CreateIRNodeError( - "Expected observed_time column '$0' to be TIME64NS, received $1", log.observed_time_column->col_name(), - types::ToString(log.observed_time_column->EvaluatedDataType())); - } - PX_ASSIGN_OR_RETURN(auto observed_time_column_index, - log.observed_time_column->GetColumnIndex()); - log_pb->set_observed_time_column_index(observed_time_column_index); - } else { - log_pb->set_observed_time_column_index(-1); - } - - log_pb->set_severity_text(log.severity_text); - - // TODO(ddelnano): Add validation for severity_number if the planner isn't the right - // place to implement the validation. - log_pb->set_severity_number(log.severity_number); - - if (log.body_column->EvaluatedDataType() != types::STRING) { - return log.body_column->CreateIRNodeError( - "Expected body column '$0' to be STRING, received $1", log.body_column->col_name(), - types::ToString(log.body_column->EvaluatedDataType())); - } - PX_ASSIGN_OR_RETURN(auto body_column_index, - log.body_column->GetColumnIndex()); - log_pb->set_body_column_index(body_column_index); - - for (const auto& attribute : log.attributes) { - PX_RETURN_IF_ERROR(attribute.ToProto(log_pb->add_attributes())); - } - } return Status::OK(); } Status OTelExportSinkIR::CopyFromNodeImpl(const IRNode* node, absl::flat_hash_map*) { - PX_RETURN_IF_ERROR(SinkOperatorIR::CopyFromNodeImpl(node, nullptr)); const OTelExportSinkIR* source = static_cast(node); return ProcessConfig(source->data_); } diff --git a/src/carnot/planner/ir/otel_export_sink_ir.h b/src/carnot/planner/ir/otel_export_sink_ir.h index cced5ada202..2caad972498 100644 --- a/src/carnot/planner/ir/otel_export_sink_ir.h +++ b/src/carnot/planner/ir/otel_export_sink_ir.h @@ -127,23 +127,11 @@ struct OTelSpan { int64_t span_kind; }; -struct OTelLog { - std::vector attributes; - - ColumnIR* time_column; - ColumnIR* observed_time_column = nullptr; - ColumnIR* body_column; - - int64_t severity_number; - std::string severity_text; -}; - struct OTelData { planpb::OTelEndpointConfig endpoint_config; std::vector resource_attributes; std::vector metrics; std::vector spans; - std::vector logs; }; /** @@ -151,10 +139,9 @@ struct OTelData { * Represents a configuration to transform a DataFrame into OpenTelemetry * data. */ -class OTelExportSinkIR : public SinkOperatorIR { +class OTelExportSinkIR : public OperatorIR { public: - explicit OTelExportSinkIR(int64_t id, std::string mutation_id) - : SinkOperatorIR(id, IRNodeType::kOTelExportSink, mutation_id) {} + explicit OTelExportSinkIR(int64_t id) : OperatorIR(id, IRNodeType::kOTelExportSink) {} Status Init(OperatorIR* parent, const OTelData& data) { PX_RETURN_IF_ERROR(ProcessConfig(data)); diff --git a/src/carnot/planner/ir/otel_export_sink_ir_test.cc b/src/carnot/planner/ir/otel_export_sink_ir_test.cc index e70abb21637..b508b2d8afb 100644 --- a/src/carnot/planner/ir/otel_export_sink_ir_test.cc +++ b/src/carnot/planner/ir/otel_export_sink_ir_test.cc @@ -443,85 +443,6 @@ INSTANTIATE_TEST_SUITE_P( .ConsumeValueOrDie(); }, }, - { - "logs_basic", - table_store::schema::Relation{ - {types::TIME64NS, types::STRING, types::STRING}, - {"start_time", "attribute_str", "log_message"}, - {types::ST_NONE, types::ST_NONE, types::ST_NONE}}, - R"pb( - endpoint_config {} - resource {} - logs { - attributes { - name: "service.name" - column { - column_type: STRING - column_index: 1 - } - } - time_column_index: 0 - observed_time_column_index: -1 - severity_number: 4 - severity_text: "INFO" - body_column_index: 2 - } - )pb", - [](IR* graph, OperatorIR* parent, table_store::schema::Relation* relation) { - OTelData data; - - auto& log = data.logs.emplace_back(); - log.time_column = CreateTypedColumn(graph, "start_time", relation); - log.attributes.push_back( - {"service.name", CreateTypedColumn(graph, "attribute_str", relation), ""}); - log.severity_number = 4; - log.severity_text = "INFO"; - log.body_column = CreateTypedColumn(graph, "log_message", relation); - - return graph->CreateNode(parent->ast(), parent, data) - .ConsumeValueOrDie(); - }, - }, - { - "logs_with_observed_time_col", - table_store::schema::Relation{ - {types::TIME64NS, types::TIME64NS, types::STRING, types::STRING}, - {"start_time", "observed_time", "attribute_str", "log_message"}, - {types::ST_NONE, types::ST_NONE, types::ST_NONE, types::ST_NONE}}, - R"pb( - endpoint_config {} - resource {} - logs { - attributes { - name: "service.name" - column { - column_type: STRING - column_index: 2 - } - } - time_column_index: 0 - observed_time_column_index: 1 - severity_number: 4 - severity_text: "INFO" - body_column_index: 3 - } - )pb", - [](IR* graph, OperatorIR* parent, table_store::schema::Relation* relation) { - OTelData data; - - auto& log = data.logs.emplace_back(); - log.time_column = CreateTypedColumn(graph, "start_time", relation); - log.observed_time_column = CreateTypedColumn(graph, "observed_time", relation); - log.attributes.push_back( - {"service.name", CreateTypedColumn(graph, "attribute_str", relation), ""}); - log.severity_number = 4; - log.severity_text = "INFO"; - log.body_column = CreateTypedColumn(graph, "log_message", relation); - - return graph->CreateNode(parent->ast(), parent, data) - .ConsumeValueOrDie(); - }, - }, { "string_value_attributes", table_store::schema::Relation{{types::TIME64NS, types::INT64}, @@ -636,33 +557,6 @@ OTelExportSinkIR* CreateSpanWithNameString(IR* graph, OperatorIR* parent, return graph->CreateNode(parent->ast(), parent, data).ConsumeValueOrDie(); } -OTelExportSinkIR* CreateLog(IR* graph, OperatorIR* parent, - table_store::schema::Relation* relation) { - OTelData data; - - auto& log = data.logs.emplace_back(); - log.time_column = CreateTypedColumn(graph, "start_time", relation); - log.body_column = CreateTypedColumn(graph, "log_message", relation); - log.severity_number = 4; - log.severity_text = "INFO"; - - return graph->CreateNode(parent->ast(), parent, data).ConsumeValueOrDie(); -} - -OTelExportSinkIR* CreateLogWithObservedTime(IR* graph, OperatorIR* parent, - table_store::schema::Relation* relation) { - OTelData data; - - auto& log = data.logs.emplace_back(); - log.time_column = CreateTypedColumn(graph, "start_time", relation); - log.observed_time_column = CreateTypedColumn(graph, "observed_time", relation); - log.body_column = CreateTypedColumn(graph, "log_message", relation); - log.severity_number = 4; - log.severity_text = "INFO"; - - return graph->CreateNode(parent->ast(), parent, data).ConsumeValueOrDie(); -} - INSTANTIATE_TEST_SUITE_P( ErrorTests, WrongColumnTypesTest, ::testing::ValuesIn(std::vector{ @@ -829,33 +723,6 @@ INSTANTIATE_TEST_SUITE_P( .ConsumeValueOrDie(); }, }, - { - "log_time_column_wrong", - table_store::schema::Relation{ - {types::INT64, types::STRING, types::STRING}, - {"start_time", "attribute_str", "log_message"}, - {types::ST_NONE, types::ST_NONE, types::ST_NONE}}, - "Expected time column 'start_time' to be TIME64NS, received INT64", - &CreateLog, - }, - { - "log_body_column_wrong", - table_store::schema::Relation{ - {types::TIME64NS, types::STRING, types::TIME64NS}, - {"start_time", "attribute_str", "log_message"}, - {types::ST_NONE, types::ST_NONE, types::ST_NONE}}, - "Expected body column 'log_message' to be STRING, received TIME64NS", - &CreateLog, - }, - { - "log_observed_time_column_wrong", - table_store::schema::Relation{ - {types::TIME64NS, types::INT64, types::STRING, types::STRING}, - {"start_time", "observed_time", "attribute_str", "log_message"}, - {types::ST_NONE, types::ST_NONE, types::ST_NONE, types::ST_NONE}}, - "Expected observed_time column 'observed_time' to be TIME64NS, received INT64", - &CreateLogWithObservedTime, - }, }), [](const ::testing::TestParamInfo& info) { return info.param.name; }); } // namespace planner diff --git a/src/carnot/planner/logical_planner_test.cc b/src/carnot/planner/logical_planner_test.cc index 5bbacb0fdf1..3ee106f50f9 100644 --- a/src/carnot/planner/logical_planner_test.cc +++ b/src/carnot/planner/logical_planner_test.cc @@ -947,12 +947,6 @@ px.export(df, px.otel.Data( name='resp_latency', value=df.resp_latency_ns, ), - px.otel.log.Log( - time=df.time_, - severity_number=px.otel.log.SEVERITY_NUMBER_INFO, - severity_text="info", - body=df.service, - ), ] )) )pxl"; diff --git a/src/carnot/planner/objects/BUILD.bazel b/src/carnot/planner/objects/BUILD.bazel index 09dfd062c26..060dc6a7888 100644 --- a/src/carnot/planner/objects/BUILD.bazel +++ b/src/carnot/planner/objects/BUILD.bazel @@ -37,7 +37,6 @@ pl_cc_library( "//src/carnot/planner/parser:cc_library", "//src/shared/types/typespb/wrapper:cc_library", "@com_github_opentelemetry_proto//:trace_proto_cc", - "@com_github_opentelemetry_proto//:logs_proto_cc", "@com_github_vinzenz_libpypa//:libpypa", ], ) diff --git a/src/carnot/planner/objects/dataframe.cc b/src/carnot/planner/objects/dataframe.cc index 6392863e707..8bcb2c09710 100644 --- a/src/carnot/planner/objects/dataframe.cc +++ b/src/carnot/planner/objects/dataframe.cc @@ -114,19 +114,15 @@ StatusOr> GetAsDataFrame(QLObjectPtr obj) { } StatusOr> Dataframe::Create(CompilerState* compiler_state, - OperatorIR* op, ASTVisitor* visitor, - std::optional mutation_id) { - std::shared_ptr df( - new Dataframe(compiler_state, op, op->graph(), visitor, mutation_id)); + OperatorIR* op, ASTVisitor* visitor) { + std::shared_ptr df(new Dataframe(compiler_state, op, op->graph(), visitor)); PX_RETURN_IF_ERROR(df->Init()); return df; } StatusOr> Dataframe::Create(CompilerState* compiler_state, IR* graph, - ASTVisitor* visitor, - std::optional mutation_id) { - std::shared_ptr df( - new Dataframe(compiler_state, nullptr, graph, visitor, mutation_id)); + ASTVisitor* visitor) { + std::shared_ptr df(new Dataframe(compiler_state, nullptr, graph, visitor)); PX_RETURN_IF_ERROR(df->Init()); return df; } @@ -310,7 +306,7 @@ StatusOr JoinHandler(CompilerState* compiler_state, IR* graph, Oper PX_ASSIGN_OR_RETURN(JoinIR * join_op, graph->CreateNode(ast, std::vector{op, right}, how_type, left_on_cols, right_on_cols, suffix_strs)); - return Dataframe::Create(compiler_state, join_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, join_op, visitor); } StatusOr ParseNameTuple(IR* ir, const pypa::AstPtr& ast, @@ -371,7 +367,7 @@ StatusOr AggHandler(CompilerState* compiler_state, IR* graph, Opera PX_ASSIGN_OR_RETURN( BlockingAggIR * agg_op, graph->CreateNode(ast, op, std::vector{}, aggregate_expressions)); - return Dataframe::Create(compiler_state, agg_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, agg_op, visitor); } StatusOr MapAssignHandler(const pypa::AstPtr& ast, const ParsedArgs&, ASTVisitor*) { @@ -388,7 +384,7 @@ StatusOr DropHandler(CompilerState* compiler_state, IR* graph, Oper PX_ASSIGN_OR_RETURN(std::vector columns, ParseAsListOfStrings(args.GetArg("columns"), "columns")); PX_ASSIGN_OR_RETURN(DropIR * drop_op, graph->CreateNode(ast, op, columns)); - return Dataframe::Create(compiler_state, drop_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, drop_op, visitor); } // Handles the head() DataFrame logic. @@ -403,7 +399,7 @@ StatusOr LimitHandler(CompilerState* compiler_state, IR* graph, Ope PX_ASSIGN_OR_RETURN(LimitIR * limit_op, graph->CreateNode(ast, op, limit_value, pem_only_val)); - return Dataframe::Create(compiler_state, limit_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, limit_op, visitor); } class SubscriptHandler { @@ -451,7 +447,7 @@ StatusOr SubscriptHandler::EvalFilter(CompilerState* compiler_state OperatorIR* op, const pypa::AstPtr& ast, ExpressionIR* expr, ASTVisitor* visitor) { PX_ASSIGN_OR_RETURN(FilterIR * filter_op, graph->CreateNode(ast, op, expr)); - return Dataframe::Create(compiler_state, filter_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, filter_op, visitor); } StatusOr SubscriptHandler::EvalColumn(IR* graph, OperatorIR*, const pypa::AstPtr&, @@ -485,7 +481,7 @@ StatusOr SubscriptHandler::EvalKeep(CompilerState* compiler_state, PX_ASSIGN_OR_RETURN(MapIR * map_op, graph->CreateNode(ast, op, keep_exprs, /* keep_input_columns */ false)); - return Dataframe::Create(compiler_state, map_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, map_op, visitor); } // Handles the groupby() method. @@ -503,7 +499,7 @@ StatusOr GroupByHandler(CompilerState* compiler_state, IR* graph, O } PX_ASSIGN_OR_RETURN(GroupByIR * group_by_op, graph->CreateNode(ast, op, groups)); - return Dataframe::Create(compiler_state, group_by_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, group_by_op, visitor); } // Handles the append() dataframe method and creates the union node. @@ -516,7 +512,7 @@ StatusOr UnionHandler(CompilerState* compiler_state, IR* graph, Ope parents.push_back(casted); } PX_ASSIGN_OR_RETURN(UnionIR * union_op, graph->CreateNode(ast, parents)); - return Dataframe::Create(compiler_state, union_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, union_op, visitor); } // Handles the rolling() dataframe method. @@ -541,7 +537,7 @@ StatusOr RollingHandler(CompilerState* compiler_state, IR* graph, O PX_ASSIGN_OR_RETURN(RollingIR * rolling_op, graph->CreateNode(ast, op, window_col, window_size)); - return Dataframe::Create(compiler_state, rolling_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, rolling_op, visitor); } /** @@ -552,7 +548,7 @@ StatusOr StreamHandler(CompilerState* compiler_state, IR* graph, Op const pypa::AstPtr& ast, const ParsedArgs&, ASTVisitor* visitor) { PX_ASSIGN_OR_RETURN(StreamIR * stream_op, graph->CreateNode(ast, op)); - return Dataframe::Create(compiler_state, stream_op, visitor, graph->mutation_id()); + return Dataframe::Create(compiler_state, stream_op, visitor); } Status Dataframe::Init() { @@ -764,7 +760,7 @@ StatusOr> Dataframe::FromColumnAssignment(CompilerSta ColExpressionVector map_exprs{{col_name, expr}}; PX_ASSIGN_OR_RETURN(MapIR * ir_node, graph_->CreateNode(expr_node, op(), map_exprs, /*keep_input_cols*/ true)); - return Dataframe::Create(compiler_state, ir_node, ast_visitor(), graph_->mutation_id()); + return Dataframe::Create(compiler_state, ir_node, ast_visitor()); } } // namespace compiler diff --git a/src/carnot/planner/objects/dataframe.h b/src/carnot/planner/objects/dataframe.h index e239e382131..73f7514ba15 100644 --- a/src/carnot/planner/objects/dataframe.h +++ b/src/carnot/planner/objects/dataframe.h @@ -43,12 +43,10 @@ class Dataframe : public QLObject { /* name */ "DataFrame", /* type */ QLObjectType::kDataframe, }; - static StatusOr> Create( - CompilerState* compiler_state, OperatorIR* op, ASTVisitor* visitor, - std::optional mutation_id = std::nullopt); - static StatusOr> Create( - CompilerState* compiler_state, IR* graph, ASTVisitor* visitor, - std::optional mutation_id = std::nullopt); + static StatusOr> Create(CompilerState* compiler_state, OperatorIR* op, + ASTVisitor* visitor); + static StatusOr> Create(CompilerState* compiler_state, IR* graph, + ASTVisitor* visitor); static bool IsDataframe(const QLObjectPtr& object) { return object->type() == DataframeType.type(); } @@ -432,17 +430,7 @@ class Dataframe : public QLObject { : QLObject(DataframeType, op ? op->ast() : nullptr, visitor), compiler_state_(compiler_state), op_(op), - graph_(graph), - mutation_id_(std::nullopt) {} - - explicit Dataframe(CompilerState* compiler_state, OperatorIR* op, IR* graph, ASTVisitor* visitor, - std::optional mutation_id) - : QLObject(DataframeType, op ? op->ast() : nullptr, visitor), - compiler_state_(compiler_state), - op_(op), - graph_(graph), - mutation_id_(mutation_id) {} - + graph_(graph) {} StatusOr> GetAttributeImpl(const pypa::AstPtr& ast, std::string_view name) const override; @@ -453,7 +441,6 @@ class Dataframe : public QLObject { CompilerState* compiler_state_; OperatorIR* op_ = nullptr; IR* graph_ = nullptr; - std::optional mutation_id_; }; StatusOr> GetAsDataFrame(QLObjectPtr obj); diff --git a/src/carnot/planner/objects/otel.cc b/src/carnot/planner/objects/otel.cc index ee07a7f67b9..6f4b0d3410f 100644 --- a/src/carnot/planner/objects/otel.cc +++ b/src/carnot/planner/objects/otel.cc @@ -41,8 +41,6 @@ namespace carnot { namespace planner { namespace compiler { -using OTelLogRecord = px::carnot::planner::OTelLog; - StatusOr> OTelModule::Create(CompilerState* compiler_state, ASTVisitor* ast_visitor, IR* ir) { auto otel_module = std::shared_ptr(new OTelModule(ast_visitor)); @@ -62,12 +60,6 @@ StatusOr> OTelTrace::Create(ASTVisitor* ast_visitor, return otel_trace; } -StatusOr> OTelLog::Create(ASTVisitor* ast_visitor, IR* graph) { - auto otel_trace = std::shared_ptr(new OTelLog(ast_visitor, graph)); - PX_RETURN_IF_ERROR(otel_trace->Init()); - return otel_trace; -} - StatusOr> EndpointConfig::Create( ASTVisitor* ast_visitor, std::string url, std::vector attributes, bool insecure, int64_t timeout) { @@ -112,7 +104,7 @@ Status ParseEndpointConfig(CompilerState* compiler_state, const QLObjectPtr& end } StatusOr> OTelDataContainer::Create( - ASTVisitor* ast_visitor, std::variant data) { + ASTVisitor* ast_visitor, std::variant data) { return std::shared_ptr(new OTelDataContainer(ast_visitor, std::move(data))); } @@ -299,7 +291,6 @@ StatusOr OTelDataDefinition(CompilerState* compiler_state, const py std::visit(overloaded{ [&otel_data](const OTelMetric& metric) { otel_data.metrics.push_back(metric); }, [&otel_data](const OTelSpan& span) { otel_data.spans.push_back(span); }, - [&otel_data](const OTelLogRecord& log) { otel_data.logs.push_back(log); }, }, container->data()); } @@ -377,9 +368,6 @@ Status OTelModule::Init(CompilerState* compiler_state, IR* ir) { PX_ASSIGN_OR_RETURN(auto trace, OTelTrace::Create(ast_visitor(), ir)); PX_RETURN_IF_ERROR(AssignAttribute("trace", trace)); - PX_ASSIGN_OR_RETURN(auto log, OTelLog::Create(ast_visitor(), ir)); - PX_RETURN_IF_ERROR(AssignAttribute("log", log)); - PX_ASSIGN_OR_RETURN( std::shared_ptr endpoint_fn, FuncObject::Create(kEndpointOpID, {"url", "headers", "insecure", "timeout"}, @@ -531,71 +519,6 @@ Status OTelTrace::Init() { return Status::OK(); } -Status OTelLog::AddSeverityNumberAttributes() { - auto ast = std::make_shared(pypa::AstType::Number); - const google::protobuf::EnumDescriptor* severity_num_desc = ::opentelemetry::proto::logs::v1::SeverityNumber_descriptor(); - if (!severity_num_desc) { - // TODO(ddelnano): return an error - } - for (int i = 0; i < severity_num_desc->value_count(); ++i) { - const google::protobuf::EnumValueDescriptor* value_desc = severity_num_desc->value(i); - PX_ASSIGN_OR_RETURN(IntIR * severity_number, - graph_->CreateNode(ast, static_cast(value_desc->number()))); - PX_ASSIGN_OR_RETURN(auto value, ExprObject::Create(severity_number, ast_visitor())); - PX_RETURN_IF_ERROR(AssignAttribute(value_desc->name(), value)); - } - PX_UNUSED(graph_); - return Status::OK(); -} - -StatusOr LogDefinition(const pypa::AstPtr& ast, const ParsedArgs& args, - ASTVisitor* visitor) { - OTelLogRecord log; - - PX_ASSIGN_OR_RETURN(log.time_column, GetArgAs(ast, args, "time")); - if (!NoneObject::IsNoneObject(args.GetArg("observed_time"))) { - PX_ASSIGN_OR_RETURN(log.observed_time_column, GetArgAs(ast, args, "observed_time")); - } - - PX_ASSIGN_OR_RETURN(log.body_column, GetArgAs(ast, args, "body")); - PX_ASSIGN_OR_RETURN(auto severity_number, GetArgAs(ast, args, "severity_number")); - log.severity_number = severity_number->val(); - - PX_ASSIGN_OR_RETURN(auto severity_text, GetArgAsString(ast, args, "severity_text")); - log.severity_text = severity_text; - - QLObjectPtr attributes = args.GetArg("attributes"); - if (!DictObject::IsDict(attributes)) { - return attributes->CreateError("Expected attributes to be a dictionary, received $0", - attributes->name()); - } - - PX_ASSIGN_OR_RETURN(log.attributes, ParseAttributes(static_cast(attributes.get()))); - - return OTelDataContainer::Create(visitor, std::move(log)); -} - -Status OTelLog::Init() { - // Setup methods. - PX_ASSIGN_OR_RETURN(std::shared_ptr span_fn, - FuncObject::Create(kLogOpID, - {"time", "observed_time", "body", "attributes", "severity_number", "severity_text"}, - {{"observed_time", "None"}, - {"severity_number", "px.otel.log.SEVERITY_NUMBER_INFO"}, - {"severity_text", "info"}, - {"attributes", "{}"}}, - /* has_variable_len_args */ false, - /* has_variable_len_kwargs */ false, - std::bind(&LogDefinition, std::placeholders::_1, - std::placeholders::_2, std::placeholders::_3), - ast_visitor())); - PX_RETURN_IF_ERROR(span_fn->SetDocString(kLogOpDocstring)); - AddMethod(kLogOpID, span_fn); - - PX_RETURN_IF_ERROR(AddSeverityNumberAttributes()); - return Status::OK(); -} - Status EndpointConfig::ToProto(planpb::OTelEndpointConfig* pb) { pb->set_url(url_); for (const auto& attr : attributes_) { diff --git a/src/carnot/planner/objects/otel.h b/src/carnot/planner/objects/otel.h index 2e218ee46fd..9cb96ea325c 100644 --- a/src/carnot/planner/objects/otel.h +++ b/src/carnot/planner/objects/otel.h @@ -24,7 +24,6 @@ #include #include "opentelemetry/proto/trace/v1/trace.pb.h" -#include "opentelemetry/proto/logs/v1/logs.pb.h" #include "src/carnot/planner/compiler_state/compiler_state.h" #include "src/carnot/planner/objects/funcobject.h" #include "src/carnot/planpb/plan.pb.h" @@ -96,7 +95,7 @@ class OTelModule : public QLObject { inserted. All columns from the DataFrame will be mapped to corresponding columns in the ClickHouse table. Passed as the data argument to `px.export`. - :topic: clickhouse + :topic: otel Args: table (string): The name of the ClickHouse table to insert data into. @@ -231,48 +230,6 @@ class OTelTrace : public QLObject { IR* graph_; }; -class OTelLog : public QLObject { - public: - inline static constexpr char kOTelLogModule[] = "log"; - static constexpr TypeDescriptor OTelLogModuleType = { - /* name */ kOTelLogModule, - /* type */ QLObjectType::kModule, - }; - static StatusOr> Create(ASTVisitor* ast_visitor, IR* graph); - - inline static constexpr char kLogOpID[] = "Log"; - inline static constexpr char kLogOpDocstring[] = R"doc( - Defines the OpenTelemetry Log type. - - Log describes how to transform a pixie DataFrame into the OpenTelemetry - Log type. - - :topic: otel - - Args: - time (Column): The column that marks the timestamp for the log, must be TIME64NS. - observed_time (Column, optional): The column that marks the XXX of the log, must be TIME64NS. - body (Column): The column that contains the log message to emit, must be STRING. - severity_number (int, optional): The OpenTelemetry SeverityNumber enum value to assign for the log, defaults to SEVERITY_NUMBER_INFO if not set. - severity_text (string, optional): The log level associated with the log, defaults to "info" if not set. - if not set. - attributes (Dict[string, Column|string], optional): A mapping of attribute name to a string or the column - that stores data about the attribute. - Returns: - OTelDataContainer: the mapping of DataFrame columns to OpenTelemetry Log fields. Can be passed - into `px.otel.Data()` as the data argument. - )doc"; - - protected: - OTelLog(ASTVisitor* ast_visitor, IR* graph) - : QLObject(OTelLogModuleType, ast_visitor), graph_(graph) {} - Status Init(); - Status AddSeverityNumberAttributes(); - - private: - IR* graph_; -}; - class EndpointConfig : public QLObject { public: struct ConnAttribute { @@ -309,7 +266,6 @@ class EndpointConfig : public QLObject { }; class OTelDataContainer : public QLObject { - using OTelLogRecord = px::carnot::planner::OTelLog; public: static constexpr TypeDescriptor OTelDataContainerType = { /* name */ "OTelDataContainer", @@ -317,20 +273,20 @@ class OTelDataContainer : public QLObject { }; static StatusOr> Create( - ASTVisitor* ast_visitor, std::variant data); + ASTVisitor* ast_visitor, std::variant data); static bool IsOTelDataContainer(const QLObjectPtr& obj) { return obj->type() == OTelDataContainerType.type(); } - const std::variant& data() const { return data_; } + const std::variant& data() const { return data_; } protected: - OTelDataContainer(ASTVisitor* ast_visitor, std::variant data) + OTelDataContainer(ASTVisitor* ast_visitor, std::variant data) : QLObject(OTelDataContainerType, ast_visitor), data_(std::move(data)) {} private: - std::variant data_; + std::variant data_; }; class ClickHouseRows : public QLObject { diff --git a/src/carnot/planner/objects/otel_test.cc b/src/carnot/planner/objects/otel_test.cc index c1b7fdfcac3..97e21cd663e 100644 --- a/src/carnot/planner/objects/otel_test.cc +++ b/src/carnot/planner/objects/otel_test.cc @@ -46,11 +46,9 @@ class OTelExportTest : public QLObjectTest { OTelModule::Create(compiler_state.get(), ast_visitor.get(), graph.get())); ASSERT_OK_AND_ASSIGN(auto otelmetric, OTelMetrics::Create(ast_visitor.get(), graph.get())); ASSERT_OK_AND_ASSIGN(auto oteltrace, OTelTrace::Create(ast_visitor.get(), graph.get())); - ASSERT_OK_AND_ASSIGN(auto otellog, OTelLog::Create(ast_visitor.get(), graph.get())); var_table->Add("otel", otel); var_table->Add("otelmetric", otelmetric); var_table->Add("oteltrace", oteltrace); - var_table->Add("otellog", otellog); } StatusOr ParseOutOTelExportIR(const std::string& otel_export_expression, @@ -471,101 +469,6 @@ otel_sink_op { parent_span_id_column_index: 7 kind_value: 2 } -})pb"}, - {"log_basic", - R"pxl( -otel.Data( - endpoint=otel.Endpoint( - url='0.0.0.0:55690', - ), - resource={ - 'service.name' : df.service, - }, - data=[ - otellog.Log( - time=df.start_time, - severity_number=4, - severity_text='info', - body=df.log_message, - ), - ] -))pxl", - table_store::schema::Relation{ - {types::TIME64NS, types::STRING, types::STRING}, - {"start_time", "service", "log_message"}, - {types::ST_NONE, types::ST_NONE, types::ST_NONE}, - }, - R"pb( -op_type: OTEL_EXPORT_SINK_OPERATOR -otel_sink_op { - endpoint_config { - url: "0.0.0.0:55690" - timeout: 5 - } - resource { - attributes { - name: "service.name" - column { - column_type: STRING - column_index: 1 - } - } - } - logs { - time_column_index: 0 - observed_time_column_index: -1 - body_column_index: 2 - severity_number: 4 - severity_text: "info" - } -})pb"}, - {"log_with_observed_time", - R"pxl( -otel.Data( - endpoint=otel.Endpoint( - url='0.0.0.0:55690', - ), - resource={ - 'service.name' : df.service, - }, - data=[ - otellog.Log( - time=df.time_, - observed_time=df.end_time, - severity_number=4, - severity_text='info', - body=df.log_message, - ), - ] -))pxl", - table_store::schema::Relation{ - {types::TIME64NS, types::TIME64NS, types::STRING, types::STRING}, - {"time_", "end_time", "service", "log_message"}, - {types::ST_NONE, types::ST_NONE, types::ST_NONE, types::ST_NONE}, - }, - R"pb( -op_type: OTEL_EXPORT_SINK_OPERATOR -otel_sink_op { - endpoint_config { - url: "0.0.0.0:55690" - timeout: 5 - } - resource { - attributes { - name: "service.name" - column { - column_type: STRING - column_index: 2 - } - } - } - logs { - time_column_index: 0 - observed_time_column_index: 1 - body_column_index: 3 - severity_number: 4 - severity_text: "info" - } })pb"}, {"all_attribute_types", R"pxl( diff --git a/src/carnot/planner/plannerpb/BUILD.bazel b/src/carnot/planner/plannerpb/BUILD.bazel index 8fb5c37e0e4..4b73065c498 100644 --- a/src/carnot/planner/plannerpb/BUILD.bazel +++ b/src/carnot/planner/plannerpb/BUILD.bazel @@ -28,7 +28,6 @@ pl_proto_library( deps = [ "//src/carnot/planner/distributedpb:distributed_plan_pl_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_proto", - "//src/carnot/planner/file_source/ir:logical_pl_proto", "//src/carnot/planpb:plan_pl_proto", "//src/common/base/statuspb:status_pl_proto", "//src/shared/types/typespb:types_pl_proto", @@ -43,7 +42,6 @@ pl_cc_proto_library( deps = [ "//src/carnot/planner/distributedpb:distributed_plan_pl_cc_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", - "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/carnot/planpb:plan_pl_cc_proto", "//src/common/base/statuspb:status_pl_cc_proto", "//src/shared/types/typespb/wrapper:cc_library", @@ -58,7 +56,6 @@ pl_go_proto_library( deps = [ "//src/carnot/planner/distributedpb:distributed_plan_pl_go_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto", - "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/carnot/planpb:plan_pl_go_proto", "//src/common/base/statuspb:status_pl_go_proto", "//src/shared/types/typespb:types_pl_go_proto", diff --git a/src/carnot/planner/plannerpb/service.pb.go b/src/carnot/planner/plannerpb/service.pb.go index 71eda5ae84a..c7a23641ce0 100755 --- a/src/carnot/planner/plannerpb/service.pb.go +++ b/src/carnot/planner/plannerpb/service.pb.go @@ -17,7 +17,6 @@ import ( math_bits "math/bits" distributedpb "px.dev/pixie/src/carnot/planner/distributedpb" logicalpb "px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb" - ir "px.dev/pixie/src/carnot/planner/file_source/ir" statuspb "px.dev/pixie/src/common/base/statuspb" reflect "reflect" strings "strings" @@ -147,6 +146,7 @@ func (m *FuncToExecute_ArgValue) GetValue() string { type Configs struct { OTelEndpointConfig *Configs_OTelEndpointConfig `protobuf:"bytes,1,opt,name=otel_endpoint_config,json=otelEndpointConfig,proto3" json:"otel_endpoint_config,omitempty"` PluginConfig *Configs_PluginConfig `protobuf:"bytes,2,opt,name=plugin_config,json=pluginConfig,proto3" json:"plugin_config,omitempty"` + ClickhouseConfig *Configs_ClickHouseConfig `protobuf:"bytes,3,opt,name=clickhouse_config,json=clickhouseConfig,proto3" json:"clickhouse_config,omitempty"` } func (m *Configs) Reset() { *m = Configs{} } @@ -195,6 +195,13 @@ func (m *Configs) GetPluginConfig() *Configs_PluginConfig { return nil } +func (m *Configs) GetClickhouseConfig() *Configs_ClickHouseConfig { + if m != nil { + return m.ClickhouseConfig + } + return nil +} + type Configs_OTelEndpointConfig struct { URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -313,6 +320,89 @@ func (m *Configs_PluginConfig) GetEndTimeNs() int64 { return 0 } +type Configs_ClickHouseConfig struct { + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"` + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,5,opt,name=password,proto3" json:"password,omitempty"` + Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"` +} + +func (m *Configs_ClickHouseConfig) Reset() { *m = Configs_ClickHouseConfig{} } +func (*Configs_ClickHouseConfig) ProtoMessage() {} +func (*Configs_ClickHouseConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_710b3465b5cdfdeb, []int{1, 2} +} +func (m *Configs_ClickHouseConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Configs_ClickHouseConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Configs_ClickHouseConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Configs_ClickHouseConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configs_ClickHouseConfig.Merge(m, src) +} +func (m *Configs_ClickHouseConfig) XXX_Size() int { + return m.Size() +} +func (m *Configs_ClickHouseConfig) XXX_DiscardUnknown() { + xxx_messageInfo_Configs_ClickHouseConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_Configs_ClickHouseConfig proto.InternalMessageInfo + +func (m *Configs_ClickHouseConfig) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *Configs_ClickHouseConfig) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Configs_ClickHouseConfig) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Configs_ClickHouseConfig) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *Configs_ClickHouseConfig) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *Configs_ClickHouseConfig) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + type QueryRequest struct { LogicalPlannerState *distributedpb.LogicalPlannerState `protobuf:"bytes,5,opt,name=logical_planner_state,json=logicalPlannerState,proto3" json:"logical_planner_state,omitempty"` QueryStr string `protobuf:"bytes,1,opt,name=query_str,json=queryStr,proto3" json:"query_str,omitempty"` @@ -600,63 +690,18 @@ func (m *ConfigUpdate) GetAgentPodName() string { return "" } -type DeleteFileSource struct { - GlobPattern string `protobuf:"bytes,1,opt,name=glob_pattern,json=globPattern,proto3" json:"glob_pattern,omitempty"` -} - -func (m *DeleteFileSource) Reset() { *m = DeleteFileSource{} } -func (*DeleteFileSource) ProtoMessage() {} -func (*DeleteFileSource) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{7} -} -func (m *DeleteFileSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteFileSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteFileSource.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteFileSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteFileSource.Merge(m, src) -} -func (m *DeleteFileSource) XXX_Size() int { - return m.Size() -} -func (m *DeleteFileSource) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteFileSource.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteFileSource proto.InternalMessageInfo - -func (m *DeleteFileSource) GetGlobPattern() string { - if m != nil { - return m.GlobPattern - } - return "" -} - type CompileMutation struct { // Types that are valid to be assigned to Mutation: // *CompileMutation_Trace // *CompileMutation_DeleteTracepoint // *CompileMutation_ConfigUpdate - // *CompileMutation_FileSource - // *CompileMutation_DeleteFileSource Mutation isCompileMutation_Mutation `protobuf_oneof:"mutation"` } func (m *CompileMutation) Reset() { *m = CompileMutation{} } func (*CompileMutation) ProtoMessage() {} func (*CompileMutation) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{8} + return fileDescriptor_710b3465b5cdfdeb, []int{7} } func (m *CompileMutation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -701,18 +746,10 @@ type CompileMutation_DeleteTracepoint struct { type CompileMutation_ConfigUpdate struct { ConfigUpdate *ConfigUpdate `protobuf:"bytes,4,opt,name=config_update,json=configUpdate,proto3,oneof" json:"config_update,omitempty"` } -type CompileMutation_FileSource struct { - FileSource *ir.FileSourceDeployment `protobuf:"bytes,5,opt,name=file_source,json=fileSource,proto3,oneof" json:"file_source,omitempty"` -} -type CompileMutation_DeleteFileSource struct { - DeleteFileSource *DeleteFileSource `protobuf:"bytes,6,opt,name=delete_file_source,json=deleteFileSource,proto3,oneof" json:"delete_file_source,omitempty"` -} func (*CompileMutation_Trace) isCompileMutation_Mutation() {} func (*CompileMutation_DeleteTracepoint) isCompileMutation_Mutation() {} func (*CompileMutation_ConfigUpdate) isCompileMutation_Mutation() {} -func (*CompileMutation_FileSource) isCompileMutation_Mutation() {} -func (*CompileMutation_DeleteFileSource) isCompileMutation_Mutation() {} func (m *CompileMutation) GetMutation() isCompileMutation_Mutation { if m != nil { @@ -742,28 +779,12 @@ func (m *CompileMutation) GetConfigUpdate() *ConfigUpdate { return nil } -func (m *CompileMutation) GetFileSource() *ir.FileSourceDeployment { - if x, ok := m.GetMutation().(*CompileMutation_FileSource); ok { - return x.FileSource - } - return nil -} - -func (m *CompileMutation) GetDeleteFileSource() *DeleteFileSource { - if x, ok := m.GetMutation().(*CompileMutation_DeleteFileSource); ok { - return x.DeleteFileSource - } - return nil -} - // XXX_OneofWrappers is for the internal use of the proto package. func (*CompileMutation) XXX_OneofWrappers() []interface{} { return []interface{}{ (*CompileMutation_Trace)(nil), (*CompileMutation_DeleteTracepoint)(nil), (*CompileMutation_ConfigUpdate)(nil), - (*CompileMutation_FileSource)(nil), - (*CompileMutation_DeleteFileSource)(nil), } } @@ -775,7 +796,7 @@ type CompileMutationsResponse struct { func (m *CompileMutationsResponse) Reset() { *m = CompileMutationsResponse{} } func (*CompileMutationsResponse) ProtoMessage() {} func (*CompileMutationsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{9} + return fileDescriptor_710b3465b5cdfdeb, []int{8} } func (m *CompileMutationsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -826,7 +847,7 @@ type GenerateOTelScriptRequest struct { func (m *GenerateOTelScriptRequest) Reset() { *m = GenerateOTelScriptRequest{} } func (*GenerateOTelScriptRequest) ProtoMessage() {} func (*GenerateOTelScriptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{10} + return fileDescriptor_710b3465b5cdfdeb, []int{9} } func (m *GenerateOTelScriptRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -877,7 +898,7 @@ type GenerateOTelScriptResponse struct { func (m *GenerateOTelScriptResponse) Reset() { *m = GenerateOTelScriptResponse{} } func (*GenerateOTelScriptResponse) ProtoMessage() {} func (*GenerateOTelScriptResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_710b3465b5cdfdeb, []int{11} + return fileDescriptor_710b3465b5cdfdeb, []int{10} } func (m *GenerateOTelScriptResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -927,12 +948,12 @@ func init() { proto.RegisterType((*Configs_OTelEndpointConfig)(nil), "px.carnot.planner.plannerpb.Configs.OTelEndpointConfig") proto.RegisterMapType((map[string]string)(nil), "px.carnot.planner.plannerpb.Configs.OTelEndpointConfig.HeadersEntry") proto.RegisterType((*Configs_PluginConfig)(nil), "px.carnot.planner.plannerpb.Configs.PluginConfig") + proto.RegisterType((*Configs_ClickHouseConfig)(nil), "px.carnot.planner.plannerpb.Configs.ClickHouseConfig") proto.RegisterType((*QueryRequest)(nil), "px.carnot.planner.plannerpb.QueryRequest") proto.RegisterType((*QueryResponse)(nil), "px.carnot.planner.plannerpb.QueryResponse") proto.RegisterType((*CompileMutationsRequest)(nil), "px.carnot.planner.plannerpb.CompileMutationsRequest") proto.RegisterType((*DeleteTracepoint)(nil), "px.carnot.planner.plannerpb.DeleteTracepoint") proto.RegisterType((*ConfigUpdate)(nil), "px.carnot.planner.plannerpb.ConfigUpdate") - proto.RegisterType((*DeleteFileSource)(nil), "px.carnot.planner.plannerpb.DeleteFileSource") proto.RegisterType((*CompileMutation)(nil), "px.carnot.planner.plannerpb.CompileMutation") proto.RegisterType((*CompileMutationsResponse)(nil), "px.carnot.planner.plannerpb.CompileMutationsResponse") proto.RegisterType((*GenerateOTelScriptRequest)(nil), "px.carnot.planner.plannerpb.GenerateOTelScriptRequest") @@ -944,82 +965,83 @@ func init() { } var fileDescriptor_710b3465b5cdfdeb = []byte{ - // 1191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xda, 0x69, 0x63, 0x3f, 0x3b, 0x25, 0x9d, 0x16, 0x70, 0x5d, 0xb1, 0x2d, 0xab, 0x82, - 0x4a, 0x81, 0x35, 0xa4, 0xff, 0x50, 0x25, 0x40, 0xb8, 0x69, 0x09, 0x55, 0x29, 0x66, 0x93, 0x56, - 0xa2, 0x2a, 0xac, 0xd6, 0xeb, 0x17, 0x77, 0xc5, 0x7a, 0x76, 0x3b, 0x3b, 0x5b, 0x39, 0x5c, 0x68, - 0x91, 0xb8, 0x23, 0xf1, 0x15, 0x10, 0x42, 0xe2, 0x33, 0x70, 0xe7, 0x98, 0x63, 0x4f, 0x11, 0x71, - 0x24, 0xc4, 0xb1, 0x1f, 0x01, 0xcd, 0x9f, 0x8d, 0xd7, 0x89, 0x9b, 0x38, 0x11, 0x47, 0x4e, 0x99, - 0x79, 0xf3, 0xde, 0xef, 0xbd, 0xfd, 0xfd, 0xe6, 0xcd, 0x73, 0xe0, 0x42, 0xc2, 0xfc, 0xa6, 0xef, - 0x31, 0x1a, 0xf1, 0x66, 0x1c, 0x7a, 0x94, 0x22, 0xcb, 0xfe, 0xc6, 0x9d, 0x66, 0x82, 0xec, 0x71, - 0xe0, 0xa3, 0x1d, 0xb3, 0x88, 0x47, 0xe4, 0x74, 0x3c, 0xb0, 0x95, 0xab, 0xad, 0x5d, 0xec, 0x6d, - 0xd7, 0xc6, 0x87, 0x13, 0x80, 0xba, 0x6b, 0xd4, 0xeb, 0x07, 0xbe, 0xcb, 0x99, 0xe7, 0x07, 0xb4, - 0xd7, 0x0c, 0x58, 0x33, 0x8c, 0x7a, 0x81, 0xef, 0x85, 0x71, 0x27, 0x5b, 0x29, 0xec, 0x46, 0x73, - 0x42, 0xf8, 0x6a, 0x10, 0xa2, 0x9b, 0x44, 0x29, 0xf3, 0x31, 0x17, 0xaa, 0x03, 0xde, 0x90, 0x01, - 0x51, 0xbf, 0x1f, 0xd1, 0x66, 0xc7, 0x4b, 0xb0, 0x99, 0x70, 0x8f, 0xa7, 0x89, 0x28, 0x5a, 0x2e, - 0xb4, 0xdb, 0xc9, 0x5e, 0xd4, 0x8b, 0xe4, 0xb2, 0x29, 0x56, 0xda, 0x7a, 0x75, 0x52, 0xb1, 0x41, - 0xc2, 0x59, 0xd0, 0x49, 0x39, 0x76, 0xe3, 0x4e, 0x7e, 0xe7, 0x0a, 0x0f, 0x15, 0x68, 0xfd, 0x6d, - 0xc0, 0xdc, 0xcd, 0x94, 0xfa, 0x2b, 0xd1, 0x8d, 0x01, 0xfa, 0x29, 0x47, 0x72, 0x1a, 0x2a, 0xab, - 0x29, 0xf5, 0x5d, 0xea, 0xf5, 0xb1, 0x6e, 0x9c, 0x35, 0xce, 0x57, 0x9c, 0xb2, 0x30, 0xdc, 0xf1, - 0xfa, 0x48, 0x1c, 0x00, 0x8f, 0xf5, 0xdc, 0xc7, 0x5e, 0x98, 0x62, 0x52, 0x2f, 0x9e, 0x2d, 0x9d, - 0xaf, 0x2e, 0x5c, 0xb4, 0xf7, 0xa0, 0xd1, 0x1e, 0x03, 0xb7, 0x3f, 0x61, 0xbd, 0x7b, 0x22, 0xd6, - 0xa9, 0x78, 0x7a, 0x95, 0x10, 0x1b, 0x4e, 0x44, 0x29, 0x8f, 0x53, 0xee, 0x72, 0xaf, 0x13, 0xa2, - 0x1b, 0x33, 0x5c, 0x0d, 0x06, 0xf5, 0x92, 0x4c, 0x7d, 0x5c, 0x1d, 0xad, 0x88, 0x93, 0xb6, 0x3c, - 0x68, 0x5c, 0x82, 0x72, 0x06, 0x43, 0x08, 0xcc, 0xe4, 0xea, 0x94, 0x6b, 0x72, 0x12, 0x8e, 0xc8, - 0xfa, 0xea, 0x45, 0x69, 0x54, 0x1b, 0xeb, 0x8f, 0x19, 0x98, 0xbd, 0x1e, 0xd1, 0xd5, 0xa0, 0x97, - 0x90, 0xa7, 0x06, 0x9c, 0x8c, 0x38, 0x86, 0x2e, 0xd2, 0x6e, 0x1c, 0x05, 0x94, 0xbb, 0xbe, 0x3c, - 0x91, 0x30, 0xd5, 0x85, 0xab, 0x7b, 0x7e, 0x90, 0x06, 0xb1, 0xbf, 0x58, 0xc1, 0xf0, 0x86, 0x8e, - 0x57, 0xb6, 0xd6, 0x2b, 0xc3, 0x8d, 0x33, 0x64, 0xb7, 0xdd, 0x21, 0x22, 0xd9, 0xb8, 0x8d, 0xdc, - 0x83, 0xb9, 0x38, 0x4c, 0x7b, 0x01, 0xcd, 0x72, 0x17, 0x65, 0xee, 0xf7, 0xa7, 0xca, 0xdd, 0x96, - 0x91, 0x1a, 0xbd, 0x16, 0xe7, 0x76, 0x8d, 0xa7, 0x45, 0x98, 0x50, 0x02, 0x39, 0x05, 0xa5, 0x94, - 0x85, 0x8a, 0xa7, 0xd6, 0xec, 0x70, 0xe3, 0x4c, 0xe9, 0xae, 0x73, 0xdb, 0x11, 0x36, 0xf2, 0x0d, - 0xcc, 0x3e, 0x44, 0xaf, 0x8b, 0x2c, 0x13, 0x74, 0xf1, 0x90, 0xdf, 0x6f, 0x2f, 0x29, 0x98, 0x1b, - 0x94, 0xb3, 0x35, 0x27, 0x03, 0x25, 0x0d, 0x28, 0x07, 0x34, 0x41, 0x3f, 0x65, 0x28, 0x45, 0x2d, - 0x3b, 0xdb, 0x7b, 0x52, 0x87, 0x59, 0x1e, 0xf4, 0x31, 0x4a, 0x79, 0x7d, 0xe6, 0xac, 0x71, 0xbe, - 0xe4, 0x64, 0xdb, 0xc6, 0x35, 0xa8, 0xe5, 0xe1, 0xc8, 0x3c, 0x94, 0xbe, 0xc5, 0x35, 0x2d, 0xb4, - 0x58, 0x4e, 0xd6, 0xf9, 0x5a, 0xf1, 0x03, 0xa3, 0xe1, 0x40, 0x2d, 0xcf, 0x10, 0xb1, 0x60, 0x2e, - 0xe1, 0x1e, 0xe3, 0xae, 0x00, 0x77, 0x69, 0x22, 0x51, 0x4a, 0x4e, 0x55, 0x1a, 0x57, 0x82, 0x3e, - 0xde, 0x49, 0x88, 0x09, 0x55, 0xa4, 0xdd, 0x6d, 0x8f, 0xa2, 0xf4, 0xa8, 0x20, 0xed, 0xaa, 0x73, - 0xeb, 0xd7, 0x22, 0xd4, 0xbe, 0x4c, 0x91, 0xad, 0x39, 0xf8, 0x28, 0xc5, 0x84, 0x93, 0x87, 0xf0, - 0xb2, 0x6e, 0x60, 0x57, 0x93, 0xe3, 0x8a, 0x46, 0xc5, 0xfa, 0x11, 0x29, 0xe4, 0xa5, 0x09, 0x24, - 0x8e, 0x75, 0xa4, 0x7d, 0x5b, 0x45, 0xb7, 0xd5, 0xe1, 0xb2, 0x88, 0x75, 0x4e, 0x84, 0xbb, 0x8d, - 0xa2, 0x23, 0x1f, 0x89, 0xcc, 0x6e, 0xc2, 0x59, 0xd6, 0x91, 0xd2, 0xb0, 0xcc, 0x19, 0xf9, 0x0c, - 0x00, 0x07, 0xe8, 0xbb, 0xa2, 0x45, 0x93, 0x7a, 0x49, 0x0a, 0x78, 0x61, 0xfa, 0x8e, 0x74, 0x2a, - 0x22, 0x5a, 0x98, 0x12, 0xf2, 0x11, 0xcc, 0xaa, 0xbb, 0x98, 0x48, 0x31, 0xaa, 0x0b, 0xe7, 0xa6, - 0xb9, 0x08, 0x4e, 0x16, 0x74, 0x6b, 0xa6, 0x5c, 0x9c, 0x2f, 0x59, 0x3f, 0x18, 0x30, 0xa7, 0x89, - 0x4a, 0xe2, 0x88, 0x26, 0x48, 0xde, 0x86, 0xa3, 0xea, 0x09, 0xd3, 0xfd, 0x75, 0x42, 0xc0, 0x66, - 0xaf, 0x9b, 0xbd, 0x2c, 0x17, 0x8e, 0x76, 0x21, 0x8b, 0x30, 0x23, 0x52, 0xe8, 0x76, 0x78, 0x6f, - 0x5f, 0x16, 0x17, 0x47, 0x3b, 0x41, 0x9a, 0x23, 0xa3, 0xad, 0xdf, 0x8b, 0xf0, 0xea, 0xf5, 0xa8, - 0x1f, 0x07, 0x21, 0x7e, 0x9e, 0x72, 0x8f, 0x07, 0x11, 0x4d, 0xfe, 0x17, 0xee, 0x05, 0xc2, 0x59, - 0x6f, 0xc2, 0xfc, 0x22, 0x86, 0xc8, 0x71, 0x85, 0x79, 0x3e, 0xca, 0x8e, 0x9e, 0xf4, 0xb2, 0x5a, - 0x0f, 0xa0, 0xa6, 0x62, 0xef, 0xc6, 0x5d, 0xf1, 0x7d, 0x53, 0xf6, 0x24, 0x39, 0x07, 0xc7, 0xbc, - 0x1e, 0x52, 0xee, 0xc6, 0x51, 0x57, 0xcd, 0x15, 0xf5, 0xb8, 0xd7, 0xa4, 0xb5, 0x1d, 0x75, 0xc5, - 0x6c, 0xb1, 0x2e, 0x67, 0x55, 0xdc, 0x0c, 0x42, 0x5c, 0x96, 0x53, 0x92, 0xbc, 0x0e, 0xb5, 0x5e, - 0x18, 0x75, 0xdc, 0xd8, 0xe3, 0x1c, 0x19, 0xd5, 0xa9, 0xaa, 0xc2, 0xd6, 0x56, 0x26, 0x6b, 0xab, - 0x04, 0x2f, 0xed, 0x90, 0x9a, 0xdc, 0x87, 0x23, 0x62, 0x44, 0xa3, 0xbe, 0x45, 0xad, 0x49, 0x92, - 0x8e, 0x8f, 0x72, 0x3b, 0x60, 0x76, 0x36, 0x8f, 0x47, 0x2c, 0x2c, 0x62, 0x1c, 0x46, 0x6b, 0x7d, - 0xa4, 0x7c, 0xa9, 0xe0, 0x28, 0x48, 0xf2, 0x00, 0x8e, 0x77, 0x65, 0x99, 0x32, 0x54, 0xf9, 0xc9, - 0xef, 0xa9, 0x2e, 0xbc, 0xbb, 0x27, 0xed, 0x3b, 0x29, 0x5e, 0x2a, 0x38, 0xf3, 0xdd, 0x9d, 0xb4, - 0xb7, 0x61, 0x4e, 0xa9, 0xe2, 0xa6, 0x92, 0x63, 0x2d, 0xe8, 0x5b, 0x53, 0x08, 0xaa, 0x44, 0x59, - 0x2a, 0x38, 0x35, 0x3f, 0x2f, 0xd2, 0x57, 0x50, 0xcd, 0xfd, 0xee, 0xd0, 0x97, 0xfc, 0xca, 0x04, - 0xbc, 0x9c, 0x97, 0x60, 0x63, 0xa4, 0xc2, 0x18, 0x0b, 0xb0, 0x3a, 0x52, 0xe7, 0x6b, 0x20, 0x9a, - 0x8a, 0x7c, 0x86, 0xa3, 0x53, 0x73, 0x31, 0x4a, 0x31, 0xe2, 0x62, 0x64, 0x6b, 0x01, 0x94, 0xfb, - 0x5a, 0x51, 0xeb, 0x67, 0x03, 0xea, 0xbb, 0x1b, 0xfa, 0x30, 0x0f, 0xcc, 0x2d, 0xa8, 0x64, 0xa8, - 0xd9, 0xc0, 0x7b, 0x67, 0x1f, 0x76, 0xc7, 0xd2, 0x3a, 0xa3, 0x70, 0xeb, 0x17, 0x03, 0x4e, 0x7d, - 0x8a, 0x14, 0x99, 0xc7, 0x51, 0xcc, 0xc3, 0x65, 0x9f, 0x05, 0x31, 0xdf, 0xf7, 0xa1, 0x31, 0xfe, - 0xeb, 0x87, 0xe6, 0x35, 0x80, 0x78, 0x10, 0xba, 0x89, 0x4c, 0xaf, 0x7b, 0xaf, 0x12, 0x0f, 0x74, - 0x3d, 0xd6, 0x77, 0xd0, 0x98, 0x54, 0xe5, 0x61, 0xd8, 0x6b, 0x42, 0x55, 0xfe, 0x72, 0xca, 0xa7, - 0x6a, 0x1d, 0x1b, 0x6e, 0x9c, 0x81, 0x1c, 0x32, 0x08, 0x17, 0xb5, 0x5e, 0x78, 0x52, 0x82, 0x63, - 0x59, 0xad, 0xea, 0xc7, 0x37, 0x41, 0xf1, 0x8c, 0x48, 0x4e, 0xe5, 0x9c, 0x20, 0x7b, 0x5f, 0xee, - 0xfc, 0xd0, 0x6d, 0x5c, 0x98, 0xc6, 0x55, 0x7f, 0xd7, 0xf7, 0x30, 0xbf, 0xf3, 0xc6, 0x90, 0x4b, - 0x07, 0x51, 0x3a, 0x9b, 0x18, 0x8d, 0xcb, 0x07, 0x8c, 0xd2, 0x05, 0xfc, 0x68, 0x00, 0xd9, 0xcd, - 0x3b, 0xb9, 0xb2, 0x27, 0xda, 0x0b, 0xaf, 0x53, 0xe3, 0xea, 0x81, 0xe3, 0x54, 0x1d, 0xad, 0x8f, - 0xd7, 0x37, 0xcd, 0xc2, 0xb3, 0x4d, 0xb3, 0xf0, 0x7c, 0xd3, 0x34, 0x9e, 0x0c, 0x4d, 0xe3, 0xb7, - 0xa1, 0x69, 0xfc, 0x39, 0x34, 0x8d, 0xf5, 0xa1, 0x69, 0xfc, 0x35, 0x34, 0x8d, 0x7f, 0x86, 0x66, - 0xe1, 0xf9, 0xd0, 0x34, 0x7e, 0xda, 0x32, 0x0b, 0xeb, 0x5b, 0x66, 0xe1, 0xd9, 0x96, 0x59, 0xb8, - 0x5f, 0xd9, 0xc6, 0xee, 0x1c, 0x95, 0xff, 0x2b, 0x5c, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x30, - 0x6b, 0x7a, 0x93, 0x5c, 0x0d, 0x00, 0x00, + // 1205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x5b, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xda, 0xb9, 0xd8, 0xc7, 0x4e, 0xff, 0xee, 0xb4, 0x7f, 0x70, 0xb7, 0x62, 0x5b, 0xad, + 0x0a, 0x0a, 0x01, 0xd6, 0x90, 0xa6, 0x04, 0x55, 0x02, 0x84, 0x93, 0x40, 0xa8, 0x4a, 0x09, 0x9b, + 0xb4, 0x0f, 0x55, 0xc5, 0x6a, 0xbd, 0x3b, 0x71, 0x56, 0x5d, 0xef, 0x6c, 0x67, 0x66, 0x8b, 0xc3, + 0x0b, 0x2d, 0x12, 0xef, 0x48, 0x7c, 0x05, 0x84, 0xb8, 0x7c, 0x11, 0x9e, 0x50, 0x1e, 0xfb, 0x14, + 0x11, 0x47, 0x42, 0x3c, 0xf6, 0x23, 0xa0, 0xb9, 0x6c, 0xe2, 0x24, 0x6e, 0xe2, 0x44, 0x3c, 0xf2, + 0xe4, 0x73, 0xfd, 0x9d, 0x33, 0xe7, 0x9c, 0x39, 0xb3, 0x86, 0x19, 0x46, 0x83, 0x66, 0xe0, 0xd3, + 0x84, 0xf0, 0x66, 0x1a, 0xfb, 0x49, 0x82, 0x69, 0xfe, 0x9b, 0xb6, 0x9b, 0x0c, 0xd3, 0xc7, 0x51, + 0x80, 0x9d, 0x94, 0x12, 0x4e, 0xd0, 0xe5, 0xb4, 0xe7, 0x28, 0x53, 0x47, 0x9b, 0x38, 0x7b, 0xa6, + 0xe6, 0xfb, 0x43, 0x80, 0xc2, 0xcd, 0xc4, 0xef, 0x46, 0x81, 0xc7, 0xa9, 0x1f, 0x44, 0x49, 0xa7, + 0x19, 0xd1, 0x66, 0x4c, 0x3a, 0x51, 0xe0, 0xc7, 0x69, 0x3b, 0xa7, 0x14, 0xb6, 0xf9, 0xaa, 0x74, + 0x27, 0xdd, 0x2e, 0x49, 0x9a, 0x6d, 0x9f, 0xe1, 0x26, 0xe3, 0x3e, 0xcf, 0x98, 0xc8, 0x41, 0x12, + 0xda, 0xec, 0x62, 0x87, 0x74, 0x88, 0x24, 0x9b, 0x82, 0xd2, 0xd2, 0xf9, 0x61, 0xb1, 0x23, 0xc6, + 0x69, 0xd4, 0xce, 0x38, 0x0e, 0xd3, 0xf6, 0x20, 0xe7, 0x09, 0x0b, 0xe5, 0x68, 0xff, 0x65, 0xc0, + 0xd4, 0xc7, 0x59, 0x12, 0xac, 0x91, 0xa5, 0x1e, 0x0e, 0x32, 0x8e, 0xd1, 0x65, 0xa8, 0xac, 0x67, + 0x49, 0xe0, 0x25, 0x7e, 0x17, 0x37, 0x8c, 0xab, 0xc6, 0x74, 0xc5, 0x2d, 0x0b, 0xc1, 0x1d, 0xbf, + 0x8b, 0x91, 0x0b, 0xe0, 0xd3, 0x8e, 0xf7, 0xd8, 0x8f, 0x33, 0xcc, 0x1a, 0xc5, 0xab, 0xa5, 0xe9, + 0xea, 0xec, 0x75, 0xe7, 0x98, 0xaa, 0x38, 0x07, 0xc0, 0x9d, 0x8f, 0x68, 0xe7, 0x9e, 0xf0, 0x75, + 0x2b, 0xbe, 0xa6, 0x18, 0x72, 0xe0, 0x02, 0xc9, 0x78, 0x9a, 0x71, 0x8f, 0xfb, 0xed, 0x18, 0x7b, + 0x29, 0xc5, 0xeb, 0x51, 0xaf, 0x51, 0x92, 0xa1, 0xcf, 0x2b, 0xd5, 0x9a, 0xd0, 0xac, 0x48, 0x85, + 0x39, 0x07, 0xe5, 0x1c, 0x06, 0x21, 0x18, 0x1b, 0xc8, 0x53, 0xd2, 0xe8, 0x22, 0x8c, 0xcb, 0xfc, + 0x1a, 0x45, 0x29, 0x54, 0x8c, 0xfd, 0xc7, 0x04, 0x4c, 0x2e, 0x90, 0x64, 0x3d, 0xea, 0x30, 0xf4, + 0xd4, 0x80, 0x8b, 0x84, 0xe3, 0xd8, 0xc3, 0x49, 0x98, 0x92, 0x28, 0xe1, 0x5e, 0x20, 0x35, 0x12, + 0xa6, 0x3a, 0x3b, 0x7f, 0xec, 0x81, 0x34, 0x88, 0xf3, 0xf9, 0x1a, 0x8e, 0x97, 0xb4, 0xbf, 0x92, + 0xb5, 0x5e, 0xea, 0x6f, 0x5f, 0x41, 0x47, 0xe5, 0x2e, 0x12, 0xc1, 0x0e, 0xca, 0xd0, 0x3d, 0x98, + 0x4a, 0xe3, 0xac, 0x13, 0x25, 0x79, 0xec, 0xa2, 0x8c, 0xfd, 0xce, 0x48, 0xb1, 0x57, 0xa4, 0xa7, + 0x46, 0xaf, 0xa5, 0x03, 0x1c, 0x6a, 0xc3, 0xf9, 0x20, 0x8e, 0x82, 0x87, 0x1b, 0x24, 0x63, 0x38, + 0xc7, 0x2e, 0x49, 0xec, 0x1b, 0x23, 0x61, 0x2f, 0x08, 0xef, 0x65, 0xe1, 0xad, 0xf1, 0xeb, 0xfb, + 0x78, 0x4a, 0x62, 0x3e, 0x2d, 0xc2, 0x90, 0x63, 0xa2, 0x4b, 0x50, 0xca, 0x68, 0xac, 0x7a, 0xd1, + 0x9a, 0xec, 0x6f, 0x5f, 0x29, 0xdd, 0x75, 0x6f, 0xbb, 0x42, 0x86, 0xbe, 0x84, 0xc9, 0x0d, 0xec, + 0x87, 0x98, 0xe6, 0x43, 0xb3, 0x78, 0xc6, 0x1a, 0x3b, 0xcb, 0x0a, 0x66, 0x29, 0xe1, 0x74, 0xd3, + 0xcd, 0x41, 0x91, 0x09, 0xe5, 0x28, 0x61, 0x38, 0xc8, 0x28, 0x96, 0x87, 0x2d, 0xbb, 0x7b, 0x3c, + 0x6a, 0xc0, 0x24, 0x8f, 0xba, 0x98, 0x64, 0xbc, 0x31, 0x76, 0xd5, 0x98, 0x2e, 0xb9, 0x39, 0x6b, + 0xde, 0x84, 0xda, 0x20, 0x1c, 0xaa, 0x43, 0xe9, 0x21, 0xde, 0xd4, 0xc3, 0x24, 0xc8, 0xe1, 0xb3, + 0x74, 0xb3, 0xf8, 0x9e, 0x61, 0xba, 0x50, 0x1b, 0xec, 0x02, 0xb2, 0x61, 0x8a, 0x71, 0x9f, 0x72, + 0x4f, 0x80, 0x7b, 0x09, 0x93, 0x28, 0x25, 0xb7, 0x2a, 0x85, 0x6b, 0x51, 0x17, 0xdf, 0x61, 0xc8, + 0x82, 0x2a, 0x4e, 0xc2, 0x3d, 0x8b, 0xa2, 0xb4, 0xa8, 0xe0, 0x24, 0x54, 0x7a, 0xf3, 0x57, 0x03, + 0xea, 0x87, 0xcb, 0x2f, 0x8e, 0xb6, 0x41, 0x18, 0x1f, 0xbc, 0x8e, 0x39, 0x2f, 0xc6, 0x5f, 0xd0, + 0x3a, 0x3b, 0x49, 0x0b, 0x59, 0x4a, 0x28, 0x97, 0x65, 0x18, 0x77, 0x25, 0x2d, 0x30, 0x32, 0x86, + 0xa9, 0xc4, 0x18, 0x53, 0x18, 0x39, 0x2f, 0x74, 0xa9, 0xcf, 0xd8, 0x57, 0x84, 0x86, 0x8d, 0x71, + 0xa5, 0xcb, 0x79, 0xa1, 0x0b, 0x7d, 0xee, 0x8b, 0x75, 0xd4, 0x98, 0x50, 0xba, 0x9c, 0xb7, 0x7f, + 0x2a, 0x42, 0xed, 0x8b, 0x0c, 0xd3, 0x4d, 0x17, 0x3f, 0xca, 0x30, 0xe3, 0x68, 0x03, 0xfe, 0xaf, + 0x37, 0x9a, 0xa7, 0x3b, 0xe9, 0x89, 0xcd, 0x85, 0x25, 0x6a, 0x75, 0x76, 0x6e, 0x48, 0xc7, 0x0f, + 0xac, 0x28, 0xe7, 0xb6, 0xf2, 0x5e, 0x51, 0xca, 0x55, 0xe1, 0xeb, 0x5e, 0x88, 0x8f, 0x0a, 0xc5, + 0x8a, 0x7a, 0x24, 0x22, 0x7b, 0x8c, 0xd3, 0xbc, 0x26, 0x52, 0xb0, 0xca, 0x29, 0xfa, 0x14, 0x00, + 0xf7, 0x70, 0xe0, 0x89, 0x9d, 0xc5, 0x1a, 0x25, 0x39, 0x6d, 0x33, 0xa3, 0xaf, 0x28, 0xb7, 0x22, + 0xbc, 0x85, 0x88, 0xa1, 0x0f, 0x60, 0x52, 0x5d, 0x20, 0x26, 0xab, 0x56, 0x9d, 0xbd, 0x36, 0xca, + 0xd4, 0xba, 0xb9, 0xd3, 0xad, 0xb1, 0x72, 0xb1, 0x5e, 0xb2, 0xbf, 0x35, 0x60, 0x4a, 0x17, 0x8a, + 0xa5, 0x24, 0x61, 0x18, 0xbd, 0x01, 0x13, 0x6a, 0xa7, 0xeb, 0x85, 0x73, 0x41, 0xc0, 0xe6, 0xeb, + 0xde, 0x59, 0x95, 0x84, 0xab, 0x4d, 0xd0, 0x22, 0x8c, 0x89, 0x10, 0x7a, 0x3f, 0xbc, 0x7d, 0x62, + 0x15, 0x17, 0xf7, 0x39, 0x51, 0x34, 0x57, 0x7a, 0xdb, 0xbf, 0x15, 0xe1, 0xe5, 0x05, 0xd2, 0x4d, + 0xa3, 0x18, 0x7f, 0x96, 0x71, 0x9f, 0x47, 0x24, 0x61, 0xff, 0x35, 0xee, 0x05, 0x8d, 0xb3, 0x5f, + 0x83, 0xfa, 0x22, 0x8e, 0x31, 0xc7, 0x6b, 0xd4, 0x0f, 0xb0, 0x5c, 0x3f, 0xc3, 0x9e, 0x1a, 0xfb, + 0x01, 0xd4, 0x94, 0xef, 0xdd, 0x34, 0x14, 0xe7, 0x1b, 0x71, 0x81, 0xa0, 0x6b, 0x70, 0xce, 0xef, + 0xe0, 0x84, 0x7b, 0x29, 0x09, 0xd5, 0x43, 0xab, 0x5e, 0xbb, 0x9a, 0x94, 0xae, 0x90, 0x50, 0x3c, + 0xb6, 0xf6, 0x2f, 0x45, 0xf8, 0xdf, 0xa1, 0x9e, 0xa1, 0xfb, 0x30, 0x2e, 0xbe, 0x25, 0xb0, 0x1e, + 0x87, 0xd6, 0xb0, 0xde, 0x1c, 0xfc, 0xe6, 0x70, 0x22, 0xea, 0xe4, 0x5f, 0x1a, 0xfb, 0xc7, 0x59, + 0xc4, 0x69, 0x4c, 0x36, 0xbb, 0x38, 0xe1, 0xcb, 0x05, 0x57, 0x41, 0xa2, 0x07, 0x70, 0x3e, 0x94, + 0xa7, 0x96, 0xae, 0xca, 0x4e, 0x3f, 0x1d, 0x6f, 0x1d, 0x5b, 0xbf, 0xc3, 0xb5, 0x5a, 0x2e, 0xb8, + 0xf5, 0xf0, 0x70, 0xfd, 0x56, 0x60, 0x4a, 0x95, 0xd7, 0xcb, 0x64, 0xb1, 0x74, 0x67, 0x5e, 0x1f, + 0xa1, 0x33, 0xaa, 0xba, 0xcb, 0x05, 0xb7, 0x16, 0x0c, 0xf0, 0x2d, 0x80, 0x72, 0x57, 0xd7, 0xc5, + 0xfe, 0xc1, 0x80, 0xc6, 0xd1, 0xf9, 0x3e, 0xcb, 0x7d, 0xbb, 0x05, 0x95, 0x1c, 0x35, 0x7f, 0xac, + 0xde, 0x3c, 0x21, 0xc7, 0x03, 0x61, 0xdd, 0x7d, 0x77, 0xfb, 0x47, 0x03, 0x2e, 0x7d, 0x82, 0x13, + 0x4c, 0x7d, 0x8e, 0xc5, 0x5b, 0xb6, 0x1a, 0xd0, 0x28, 0xe5, 0x27, 0xde, 0x3b, 0xe3, 0xdf, 0xbe, + 0x77, 0xaf, 0x00, 0xa4, 0xbd, 0xd8, 0x63, 0x32, 0xbc, 0x1e, 0xc5, 0x4a, 0xda, 0xd3, 0xf9, 0xd8, + 0x5f, 0x83, 0x39, 0x2c, 0xcb, 0xb3, 0x54, 0xaf, 0x09, 0x55, 0xf9, 0x65, 0x35, 0x18, 0xaa, 0x75, + 0xae, 0xbf, 0x7d, 0x05, 0x06, 0x90, 0x41, 0x98, 0x28, 0x7a, 0xf6, 0x49, 0x09, 0xce, 0xe5, 0xb9, + 0xaa, 0x6f, 0x6d, 0x84, 0xc5, 0xad, 0x92, 0x35, 0x95, 0x6b, 0x13, 0x1d, 0x3f, 0x22, 0x83, 0x6f, + 0x90, 0x39, 0x33, 0x8a, 0xa9, 0x3e, 0xd7, 0x37, 0x50, 0x3f, 0x3c, 0x31, 0x68, 0xee, 0x34, 0x9d, + 0xce, 0x17, 0xa8, 0x79, 0xe3, 0x94, 0x5e, 0x3a, 0x81, 0xef, 0x0c, 0x40, 0x47, 0xeb, 0x8e, 0xde, + 0x3d, 0x16, 0xed, 0x85, 0xe3, 0x64, 0xce, 0x9f, 0xda, 0x4f, 0xe5, 0xd1, 0xfa, 0x70, 0x6b, 0xc7, + 0x2a, 0x3c, 0xdb, 0xb1, 0x0a, 0xcf, 0x77, 0x2c, 0xe3, 0x49, 0xdf, 0x32, 0x7e, 0xee, 0x5b, 0xc6, + 0xef, 0x7d, 0xcb, 0xd8, 0xea, 0x5b, 0xc6, 0x9f, 0x7d, 0xcb, 0xf8, 0xbb, 0x6f, 0x15, 0x9e, 0xf7, + 0x2d, 0xe3, 0xfb, 0x5d, 0xab, 0xb0, 0xb5, 0x6b, 0x15, 0x9e, 0xed, 0x5a, 0x85, 0xfb, 0x95, 0x3d, + 0xec, 0xf6, 0x84, 0xfc, 0x2f, 0x71, 0xfd, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8b, 0xee, 0x54, + 0x71, 0x4b, 0x0d, 0x00, 0x00, } func (this *FuncToExecute) Equal(that interface{}) bool { @@ -1109,6 +1131,9 @@ func (this *Configs) Equal(that interface{}) bool { if !this.PluginConfig.Equal(that1.PluginConfig) { return false } + if !this.ClickhouseConfig.Equal(that1.ClickhouseConfig) { + return false + } return true } func (this *Configs_OTelEndpointConfig) Equal(that interface{}) bool { @@ -1176,6 +1201,45 @@ func (this *Configs_PluginConfig) Equal(that interface{}) bool { } return true } +func (this *Configs_ClickHouseConfig) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Configs_ClickHouseConfig) + if !ok { + that2, ok := that.(Configs_ClickHouseConfig) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Hostname != that1.Hostname { + return false + } + if this.Host != that1.Host { + return false + } + if this.Port != that1.Port { + return false + } + if this.Username != that1.Username { + return false + } + if this.Password != that1.Password { + return false + } + if this.Database != that1.Database { + return false + } + return true +} func (this *QueryRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1333,30 +1397,6 @@ func (this *ConfigUpdate) Equal(that interface{}) bool { } return true } -func (this *DeleteFileSource) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*DeleteFileSource) - if !ok { - that2, ok := that.(DeleteFileSource) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.GlobPattern != that1.GlobPattern { - return false - } - return true -} func (this *CompileMutation) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1459,54 +1499,6 @@ func (this *CompileMutation_ConfigUpdate) Equal(that interface{}) bool { } return true } -func (this *CompileMutation_FileSource) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CompileMutation_FileSource) - if !ok { - that2, ok := that.(CompileMutation_FileSource) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.FileSource.Equal(that1.FileSource) { - return false - } - return true -} -func (this *CompileMutation_DeleteFileSource) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CompileMutation_DeleteFileSource) - if !ok { - that2, ok := that.(CompileMutation_DeleteFileSource) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.DeleteFileSource.Equal(that1.DeleteFileSource) { - return false - } - return true -} func (this *CompileMutationsResponse) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1622,7 +1614,7 @@ func (this *Configs) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&plannerpb.Configs{") if this.OTelEndpointConfig != nil { s = append(s, "OTelEndpointConfig: "+fmt.Sprintf("%#v", this.OTelEndpointConfig)+",\n") @@ -1630,6 +1622,9 @@ func (this *Configs) GoString() string { if this.PluginConfig != nil { s = append(s, "PluginConfig: "+fmt.Sprintf("%#v", this.PluginConfig)+",\n") } + if this.ClickhouseConfig != nil { + s = append(s, "ClickhouseConfig: "+fmt.Sprintf("%#v", this.ClickhouseConfig)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -1669,6 +1664,21 @@ func (this *Configs_PluginConfig) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *Configs_ClickHouseConfig) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&plannerpb.Configs_ClickHouseConfig{") + s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") + s = append(s, "Password: "+fmt.Sprintf("%#v", this.Password)+",\n") + s = append(s, "Database: "+fmt.Sprintf("%#v", this.Database)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *QueryRequest) GoString() string { if this == nil { return "nil" @@ -1744,21 +1754,11 @@ func (this *ConfigUpdate) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *DeleteFileSource) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&plannerpb.DeleteFileSource{") - s = append(s, "GlobPattern: "+fmt.Sprintf("%#v", this.GlobPattern)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} func (this *CompileMutation) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 7) s = append(s, "&plannerpb.CompileMutation{") if this.Mutation != nil { s = append(s, "Mutation: "+fmt.Sprintf("%#v", this.Mutation)+",\n") @@ -1790,22 +1790,6 @@ func (this *CompileMutation_ConfigUpdate) GoString() string { `ConfigUpdate:` + fmt.Sprintf("%#v", this.ConfigUpdate) + `}`}, ", ") return s } -func (this *CompileMutation_FileSource) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&plannerpb.CompileMutation_FileSource{` + - `FileSource:` + fmt.Sprintf("%#v", this.FileSource) + `}`}, ", ") - return s -} -func (this *CompileMutation_DeleteFileSource) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&plannerpb.CompileMutation_DeleteFileSource{` + - `DeleteFileSource:` + fmt.Sprintf("%#v", this.DeleteFileSource) + `}`}, ", ") - return s -} func (this *CompileMutationsResponse) GoString() string { if this == nil { return "nil" @@ -2116,6 +2100,18 @@ func (m *Configs) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ClickhouseConfig != nil { + { + size, err := m.ClickhouseConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.PluginConfig != nil { { size, err := m.PluginConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -2240,7 +2236,7 @@ func (m *Configs_PluginConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *QueryRequest) Marshal() (dAtA []byte, err error) { +func (m *Configs_ClickHouseConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2250,43 +2246,106 @@ func (m *QueryRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Configs_ClickHouseConfig) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Configs_ClickHouseConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.LogicalPlannerState != nil { - { - size, err := m.LogicalPlannerState.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } + if len(m.Database) > 0 { + i -= len(m.Database) + copy(dAtA[i:], m.Database) + i = encodeVarintService(dAtA, i, uint64(len(m.Database))) + i-- + dAtA[i] = 0x32 + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintService(dAtA, i, uint64(len(m.Password))) i-- dAtA[i] = 0x2a } - if m.Configs != nil { - { - size, err := m.Configs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintService(dAtA, i, uint64(len(m.Username))) i-- dAtA[i] = 0x22 } - if len(m.ExecFuncs) > 0 { - for iNdEx := len(m.ExecFuncs) - 1; iNdEx >= 0; iNdEx-- { - { + if m.Port != 0 { + i = encodeVarintService(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintService(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintService(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LogicalPlannerState != nil { + { + size, err := m.LogicalPlannerState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Configs != nil { + { + size, err := m.Configs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.ExecFuncs) > 0 { + for iNdEx := len(m.ExecFuncs) - 1; iNdEx >= 0; iNdEx-- { + { size, err := m.ExecFuncs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err @@ -2497,36 +2556,6 @@ func (m *ConfigUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DeleteFileSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteFileSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteFileSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.GlobPattern) > 0 { - i -= len(m.GlobPattern) - copy(dAtA[i:], m.GlobPattern) - i = encodeVarintService(dAtA, i, uint64(len(m.GlobPattern))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *CompileMutation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2622,48 +2651,6 @@ func (m *CompileMutation_ConfigUpdate) MarshalToSizedBuffer(dAtA []byte) (int, e } return len(dAtA) - i, nil } -func (m *CompileMutation_FileSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CompileMutation_FileSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.FileSource != nil { - { - size, err := m.FileSource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *CompileMutation_DeleteFileSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CompileMutation_DeleteFileSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeleteFileSource != nil { - { - size, err := m.DeleteFileSource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} func (m *CompileMutationsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2862,6 +2849,10 @@ func (m *Configs) Size() (n int) { l = m.PluginConfig.Size() n += 1 + l + sovService(uint64(l)) } + if m.ClickhouseConfig != nil { + l = m.ClickhouseConfig.Size() + n += 1 + l + sovService(uint64(l)) + } return n } @@ -2907,6 +2898,38 @@ func (m *Configs_PluginConfig) Size() (n int) { return n } +func (m *Configs_ClickHouseConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Host) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + if m.Port != 0 { + n += 1 + sovService(uint64(m.Port)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Database) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + func (m *QueryRequest) Size() (n int) { if m == nil { return 0 @@ -3012,19 +3035,6 @@ func (m *ConfigUpdate) Size() (n int) { return n } -func (m *DeleteFileSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.GlobPattern) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n -} - func (m *CompileMutation) Size() (n int) { if m == nil { return 0 @@ -3073,30 +3083,6 @@ func (m *CompileMutation_ConfigUpdate) Size() (n int) { } return n } -func (m *CompileMutation_FileSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FileSource != nil { - l = m.FileSource.Size() - n += 1 + l + sovService(uint64(l)) - } - return n -} -func (m *CompileMutation_DeleteFileSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.DeleteFileSource != nil { - l = m.DeleteFileSource.Size() - n += 1 + l + sovService(uint64(l)) - } - return n -} func (m *CompileMutationsResponse) Size() (n int) { if m == nil { return 0 @@ -3191,6 +3177,7 @@ func (this *Configs) String() string { s := strings.Join([]string{`&Configs{`, `OTelEndpointConfig:` + strings.Replace(fmt.Sprintf("%v", this.OTelEndpointConfig), "Configs_OTelEndpointConfig", "Configs_OTelEndpointConfig", 1) + `,`, `PluginConfig:` + strings.Replace(fmt.Sprintf("%v", this.PluginConfig), "Configs_PluginConfig", "Configs_PluginConfig", 1) + `,`, + `ClickhouseConfig:` + strings.Replace(fmt.Sprintf("%v", this.ClickhouseConfig), "Configs_ClickHouseConfig", "Configs_ClickHouseConfig", 1) + `,`, `}`, }, "") return s @@ -3229,6 +3216,21 @@ func (this *Configs_PluginConfig) String() string { }, "") return s } +func (this *Configs_ClickHouseConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Configs_ClickHouseConfig{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Password:` + fmt.Sprintf("%v", this.Password) + `,`, + `Database:` + fmt.Sprintf("%v", this.Database) + `,`, + `}`, + }, "") + return s +} func (this *QueryRequest) String() string { if this == nil { return "nil" @@ -3298,16 +3300,6 @@ func (this *ConfigUpdate) String() string { }, "") return s } -func (this *DeleteFileSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteFileSource{`, - `GlobPattern:` + fmt.Sprintf("%v", this.GlobPattern) + `,`, - `}`, - }, "") - return s -} func (this *CompileMutation) String() string { if this == nil { return "nil" @@ -3348,26 +3340,6 @@ func (this *CompileMutation_ConfigUpdate) String() string { }, "") return s } -func (this *CompileMutation_FileSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CompileMutation_FileSource{`, - `FileSource:` + strings.Replace(fmt.Sprintf("%v", this.FileSource), "FileSourceDeployment", "ir.FileSourceDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CompileMutation_DeleteFileSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CompileMutation_DeleteFileSource{`, - `DeleteFileSource:` + strings.Replace(fmt.Sprintf("%v", this.DeleteFileSource), "DeleteFileSource", "DeleteFileSource", 1) + `,`, - `}`, - }, "") - return s -} func (this *CompileMutationsResponse) String() string { if this == nil { return "nil" @@ -3777,6 +3749,42 @@ func (m *Configs) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClickhouseConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClickhouseConfig == nil { + m.ClickhouseConfig = &Configs_ClickHouseConfig{} + } + if err := m.ClickhouseConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -4134,7 +4142,7 @@ func (m *Configs_PluginConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *QueryRequest) Unmarshal(dAtA []byte) error { +func (m *Configs_ClickHouseConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4157,15 +4165,15 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ClickHouseConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClickHouseConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QueryStr", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4193,13 +4201,13 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.QueryStr = string(dAtA[iNdEx:postIndex]) + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecFuncs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -4209,30 +4217,259 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExecFuncs = append(m.ExecFuncs, &FuncToExecute{}) - if err := m.ExecFuncs[len(m.ExecFuncs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) - } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Database = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryStr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QueryStr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecFuncs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecFuncs = append(m.ExecFuncs, &FuncToExecute{}) + if err := m.ExecFuncs[len(m.ExecFuncs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -4860,88 +5097,6 @@ func (m *ConfigUpdate) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeleteFileSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteFileSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteFileSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GlobPattern", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GlobPattern = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *CompileMutation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5076,76 +5231,6 @@ func (m *CompileMutation) Unmarshal(dAtA []byte) error { } m.Mutation = &CompileMutation_ConfigUpdate{v} iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ir.FileSourceDeployment{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Mutation = &CompileMutation_FileSource{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteFileSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteFileSource{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Mutation = &CompileMutation_DeleteFileSource{v} - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) diff --git a/src/carnot/planner/plannerpb/service.proto b/src/carnot/planner/plannerpb/service.proto index f2a17d3a0a2..4c3fd9a99a8 100644 --- a/src/carnot/planner/plannerpb/service.proto +++ b/src/carnot/planner/plannerpb/service.proto @@ -23,7 +23,6 @@ package px.carnot.planner.plannerpb; option go_package = "plannerpb"; import "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.proto"; -import "src/carnot/planner/file_source/ir/logical.proto"; import "src/common/base/statuspb/status.proto"; import "gogoproto/gogo.proto"; import "src/carnot/planner/distributedpb/distributed_plan.proto"; @@ -146,11 +145,6 @@ message ConfigUpdate { string agent_pod_name = 3; } -message DeleteFileSource { - // The glob pattern to use to find files to read. Also doubles as the name of the file source. - string glob_pattern = 1; -} - // The definition of a mutation to perfom on Vizier. Mutations include operations // that add and delete tables to the database. message CompileMutation { @@ -162,10 +156,6 @@ message CompileMutation { DeleteTracepoint delete_tracepoint = 3; // Mutation that sets a config. ConfigUpdate config_update = 4; - // Mutation that adds a file source/poller - carnot.planner.file_source.ir.FileSourceDeployment file_source = 5; - // Mutation that deletes a file source/poller - DeleteFileSource delete_file_source = 6; } } diff --git a/src/carnot/planner/probes/BUILD.bazel b/src/carnot/planner/probes/BUILD.bazel index bd98b0fb8d6..f9fee130715 100644 --- a/src/carnot/planner/probes/BUILD.bazel +++ b/src/carnot/planner/probes/BUILD.bazel @@ -37,7 +37,6 @@ pl_cc_library( hdrs = ["probes.h"], deps = [ "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", - "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/carnot/planner/objects:cc_library", "//src/common/uuid:cc_library", ], diff --git a/src/carnot/planner/probes/probes.cc b/src/carnot/planner/probes/probes.cc index 942abab414c..fbe21a674d5 100644 --- a/src/carnot/planner/probes/probes.cc +++ b/src/carnot/planner/probes/probes.cc @@ -108,14 +108,6 @@ std::vector MutationsIR::Deployments() { return deployments; } -std::vector MutationsIR::FileSourceDeployments() { - std::vector file_source_deployments; - for (size_t i = 0; i < file_source_deployments_.size(); i++) { - file_source_deployments.push_back(file_source_deployments_[i]); - } - return file_source_deployments; -} - std::shared_ptr MutationsIR::StartProbe(const std::string& function_name) { auto tracepoint_ir = std::make_shared(function_name); probes_pool_.push_back(tracepoint_ir); @@ -300,35 +292,15 @@ Status MutationsIR::ToProto(plannerpb::CompileMutationsResponse* pb) { pb->add_mutations()->mutable_delete_tracepoint()->set_name(tracepoint_to_delete); } - for (const auto& file_source_to_delete : FileSourcesToDelete()) { - pb->add_mutations()->mutable_delete_file_source()->set_glob_pattern(file_source_to_delete); - } - for (const auto& update : config_updates_) { *(pb->add_mutations()->mutable_config_update()) = update; } - for (const auto& file_source : file_source_deployments_) { - *(pb->add_mutations()->mutable_file_source()) = file_source; - } - return Status::OK(); } void MutationsIR::EndProbe() { current_tracepoint_ = nullptr; } -void MutationsIR::CreateFileSourceDeployment(const std::string& glob_pattern, - const std::string& table_name, int64_t ttl_ns) { - file_source::ir::FileSourceDeployment file_source; - file_source.set_name(glob_pattern); - file_source.set_glob_pattern(glob_pattern); - file_source.set_table_name(table_name); - auto one_sec = std::chrono::duration_cast(std::chrono::seconds(1)); - file_source.mutable_ttl()->set_seconds(ttl_ns / one_sec.count()); - file_source.mutable_ttl()->set_nanos(ttl_ns % one_sec.count()); - file_source_deployments_.push_back(file_source); -} - } // namespace compiler } // namespace planner } // namespace carnot diff --git a/src/carnot/planner/probes/probes.h b/src/carnot/planner/probes/probes.h index 3d90992402b..3578cdb33d6 100644 --- a/src/carnot/planner/probes/probes.h +++ b/src/carnot/planner/probes/probes.h @@ -23,7 +23,6 @@ #include #include "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.pb.h" -#include "src/carnot/planner/file_source/ir/logical.pb.h" #include "src/carnot/planner/objects/funcobject.h" #include "src/carnot/planner/plannerpb/service.pb.h" #include "src/carnot/planner/probes/label_selector_target.h" @@ -167,20 +166,6 @@ class TracepointIR { std::shared_ptr output_ = nullptr; }; -class FileSourceDeployment { - public: - FileSourceDeployment(const std::string& glob_pattern, const std::string& table_name, - int64_t ttl_ns) - : glob_pattern_(glob_pattern), table_name_(table_name), ttl_ns_(ttl_ns) {} - - Status ToProto(file_source::ir::FileSourceDeployment pb) const; - - private: - std::string glob_pattern_; - std::string table_name_; - int64_t ttl_ns_; -}; - class TracepointDeployment { public: TracepointDeployment(const std::string& trace_name, int64_t ttl_ns) @@ -240,10 +225,6 @@ class MutationsIR { */ std::shared_ptr StartProbe(const std::string& function_name); - void CreateFileSourceDeployment(const std::string& glob_pattern, const std::string& table_name, - int64_t ttl_ns); - - void CreateDeleteFileSource(const std::string& glob_pattern); /** * @brief Create a TraceProgram for the MutationsIR w/ the specified UPID. * @@ -350,19 +331,6 @@ class MutationsIR { std::vector Deployments(); - std::vector FileSourceDeployments(); - - /** - * @brief Deletes the file source passed in. - * - * @param file_source_to_delete - */ - void DeleteFileSource(const std::string& file_source_to_delete) { - file_sources_to_delete_.push_back(file_source_to_delete); - } - - const std::vector& FileSourcesToDelete() { return file_sources_to_delete_; } - private: // All the new tracepoints added as part of this mutation. DeploymentSpecs are protobufs because // we only modify these upon inserting the new tracepoint, while the Tracepoint definition is @@ -380,9 +348,6 @@ class MutationsIR { // The updates to internal config that need to be done. std::vector config_updates_; - - std::vector file_source_deployments_; - std::vector file_sources_to_delete_; }; } // namespace compiler diff --git a/src/carnot/planner/probes/tracepoint_generator.cc b/src/carnot/planner/probes/tracepoint_generator.cc index 3dc23bd2c66..bd2f817b035 100644 --- a/src/carnot/planner/probes/tracepoint_generator.cc +++ b/src/carnot/planner/probes/tracepoint_generator.cc @@ -28,16 +28,14 @@ #include "src/carnot/planner/probes/probes.h" #include "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.pb.h" -#include "src/carnot/planner/file_source/ir/logical.pb.h" namespace px { namespace carnot { namespace planner { namespace compiler { -namespace { - -StatusOr CompileMutations(std::string_view query) { +StatusOr CompileTracepoint( + std::string_view query) { // Create a compiler state; it doesn't affect the tracepoint compilation. // TODO(oazizi): Try inserting nullptr for registry_info. px::carnot::planner::RegistryInfo registry_info; @@ -67,22 +65,10 @@ StatusOr CompileMutations(std::string_view if (pb.mutations_size() != 1) { return error::Internal("Unexpected number of mutations"); } - return pb; -} - -} // namespace -StatusOr CompileTracepoint( - std::string_view query) { - PX_ASSIGN_OR_RETURN(auto pb, CompileMutations(query)); return pb.mutations()[0].trace(); } -StatusOr CompileFileSource(std::string_view query) { - PX_ASSIGN_OR_RETURN(auto pb, CompileMutations(query)); - return pb.mutations()[0].file_source(); -} - } // namespace compiler } // namespace planner } // namespace carnot diff --git a/src/carnot/planner/probes/tracepoint_generator.h b/src/carnot/planner/probes/tracepoint_generator.h index 0894d92bbec..7cc4a957515 100644 --- a/src/carnot/planner/probes/tracepoint_generator.h +++ b/src/carnot/planner/probes/tracepoint_generator.h @@ -33,12 +33,6 @@ namespace compiler { StatusOr CompileTracepoint( std::string_view query); -/** - * Take a file source specification in PXL format, and compiles it to a logical file source - * deployment. - */ -StatusOr CompileFileSource(std::string_view query); - } // namespace compiler } // namespace planner } // namespace carnot diff --git a/src/carnot/planner/test_utils.h b/src/carnot/planner/test_utils.h index 8ba8af3aabb..84a5f94c8fe 100644 --- a/src/carnot/planner/test_utils.h +++ b/src/carnot/planner/test_utils.h @@ -286,95 +286,6 @@ relation_map { } )proto"; -constexpr char kFileSourceSchema[] = R"proto( -relation_map { - key: "kern.log" - value { - columns { - column_name: "time_" - column_type: TIME64NS - column_semantic_type: ST_NONE - } - columns { - column_name: "upid" - column_type: UINT128 - column_semantic_type: ST_UPID - } - columns { - column_name: "service" - column_type: STRING - column_semantic_type: ST_NONE - } - columns { - column_name: "resp_latency_ns" - column_type: INT64 - column_semantic_type: ST_DURATION_NS - } - mutation_id: "mutation" - } -} -relation_map { - key: "cpu" - value { - columns { - column_name: "count" - column_type: INT64 - column_semantic_type: ST_NONE - } - columns { - column_name: "cpu0" - column_type: FLOAT64 - column_semantic_type: ST_NONE - } - columns { - column_name: "cpu1" - column_type: FLOAT64 - column_semantic_type: ST_NONE - } - columns { - column_name: "cpu2" - column_type: FLOAT64 - column_semantic_type: ST_NONE - } - } -} -relation_map { - key: "process_stats" - value { - columns { - column_name: "upid" - column_type: UINT128 - column_semantic_type: ST_UPID - } - columns { - column_name: "cpu_ktime_ns" - column_type: INT64 - column_semantic_type: ST_NONE - } - columns { - column_name: "cpu_utime_ns" - column_type: INT64 - column_semantic_type: ST_NONE - } - } -} -relation_map { - key: "only_pem1" - value { - columns { - column_name: "time_" - column_type: TIME64NS - column_semantic_type: ST_NONE - } - columns { - column_name: "upid" - column_type: UINT128 - column_semantic_type: ST_NONE - } - } -} -)proto"; - constexpr char kConnStatsSchema[] = R"proto( relation_map { key: "conn_stats" @@ -1233,229 +1144,6 @@ schema_info { } )proto"; -constexpr char kThreePEMsOneKelvinAllHasDataStoreDistributedState[] = R"proto( -carnot_info { - query_broker_address: "pem1" - agent_id { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000001 - } - has_grpc_server: false - has_data_store: true - processes_data: true - accepts_remote_sources: false - asid: 123 - table_info { - table: "table" - } -} -carnot_info { - query_broker_address: "pem2" - agent_id { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000002 - } - has_grpc_server: false - has_data_store: true - processes_data: true - accepts_remote_sources: false - asid: 789 - table_info { - table: "table" - } -} -carnot_info { - query_broker_address: "pem3" - agent_id { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000003 - } - has_grpc_server: false - has_data_store: true - processes_data: true - accepts_remote_sources: false - asid: 111 - table_info { - table: "table" - } -} -carnot_info { - query_broker_address: "kelvin" - agent_id { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000004 - } - grpc_address: "1111" - has_grpc_server: true - has_data_store: true - processes_data: true - accepts_remote_sources: true - asid: 456 - ssl_targetname: "kelvin.pl.svc" -} -schema_info { - name: "table" - relation { - columns { - column_name: "time_" - column_type: TIME64NS - column_semantic_type: ST_NONE - } - columns { - column_name: "cpu_cycles" - column_type: INT64 - column_semantic_type: ST_NONE - } - columns { - column_name: "upid" - column_type: UINT128 - column_semantic_type: ST_NONE - } - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000001 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000002 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000003 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000004 - } -} -schema_info { - name: "cql_events" - relation { - columns { - column_name: "time_" - column_type: TIME64NS - column_semantic_type: ST_NONE - } - columns { - column_name: "upid" - column_type: UINT128 - column_semantic_type: ST_NONE - } - columns { - column_name: "remote_addr" - column_type: STRING - column_semantic_type: ST_NONE - } - columns { - column_name: "remote_port" - column_type: INT64 - column_semantic_type: ST_NONE - } - columns { - column_name: "trace_role" - column_type: INT64 - column_semantic_type: ST_NONE - } - columns { - column_name: "latency" - column_type: INT64 - column_semantic_type: ST_NONE - } - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000001 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000002 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000003 - } -} -schema_info { - name: "http_events" - relation { - columns { - column_name: "time_" - column_type: TIME64NS - column_semantic_type: ST_NONE - } - columns { - column_name: "upid" - column_type: UINT128 - column_semantic_type: ST_NONE - } - columns { - column_name: "local_addr" - column_type: STRING - column_semantic_type: ST_NONE - } - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000001 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000002 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000003 - } -} -schema_info { - name: "process_stats" - relation { - columns { - column_name: "time_" - column_type: TIME64NS - column_semantic_type: ST_NONE - } - columns { - column_name: "upid" - column_type: UINT128 - column_semantic_type: ST_NONE - } - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000001 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000002 - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000003 - } -} -schema_info { - name: "only_pem1" - relation { - columns { - column_name: "time_" - column_type: TIME64NS - column_semantic_type: ST_NONE - } - columns { - column_name: "upid" - column_type: UINT128 - column_semantic_type: ST_NONE - } - } - agent_list { - high_bits: 0x0000000100000000 - low_bits: 0x0000000000000001 - } -} -)proto"; - constexpr char kOnePEMOneKelvinDistributedState[] = R"proto( carnot_info { agent_id { diff --git a/src/carnot/planpb/plan.pb.go b/src/carnot/planpb/plan.pb.go index ef00e006fa3..bb5a6584aea 100755 --- a/src/carnot/planpb/plan.pb.go +++ b/src/carnot/planpb/plan.pb.go @@ -34,20 +34,22 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type OperatorType int32 const ( - OPERATOR_TYPE_UNKNOWN OperatorType = 0 - MEMORY_SOURCE_OPERATOR OperatorType = 1000 - GRPC_SOURCE_OPERATOR OperatorType = 1100 - UDTF_SOURCE_OPERATOR OperatorType = 1200 - EMPTY_SOURCE_OPERATOR OperatorType = 1300 - MAP_OPERATOR OperatorType = 2000 - AGGREGATE_OPERATOR OperatorType = 2100 - FILTER_OPERATOR OperatorType = 2200 - LIMIT_OPERATOR OperatorType = 2300 - UNION_OPERATOR OperatorType = 2400 - JOIN_OPERATOR OperatorType = 2500 - MEMORY_SINK_OPERATOR OperatorType = 9000 - GRPC_SINK_OPERATOR OperatorType = 9100 - OTEL_EXPORT_SINK_OPERATOR OperatorType = 9200 + OPERATOR_TYPE_UNKNOWN OperatorType = 0 + MEMORY_SOURCE_OPERATOR OperatorType = 1000 + GRPC_SOURCE_OPERATOR OperatorType = 1100 + UDTF_SOURCE_OPERATOR OperatorType = 1200 + EMPTY_SOURCE_OPERATOR OperatorType = 1300 + CLICKHOUSE_SOURCE_OPERATOR OperatorType = 1400 + MAP_OPERATOR OperatorType = 2000 + AGGREGATE_OPERATOR OperatorType = 2100 + FILTER_OPERATOR OperatorType = 2200 + LIMIT_OPERATOR OperatorType = 2300 + UNION_OPERATOR OperatorType = 2400 + JOIN_OPERATOR OperatorType = 2500 + MEMORY_SINK_OPERATOR OperatorType = 9000 + GRPC_SINK_OPERATOR OperatorType = 9100 + OTEL_EXPORT_SINK_OPERATOR OperatorType = 9200 + CLICKHOUSE_EXPORT_SINK_OPERATOR OperatorType = 9300 ) var OperatorType_name = map[int32]string{ @@ -56,6 +58,7 @@ var OperatorType_name = map[int32]string{ 1100: "GRPC_SOURCE_OPERATOR", 1200: "UDTF_SOURCE_OPERATOR", 1300: "EMPTY_SOURCE_OPERATOR", + 1400: "CLICKHOUSE_SOURCE_OPERATOR", 2000: "MAP_OPERATOR", 2100: "AGGREGATE_OPERATOR", 2200: "FILTER_OPERATOR", @@ -65,23 +68,26 @@ var OperatorType_name = map[int32]string{ 9000: "MEMORY_SINK_OPERATOR", 9100: "GRPC_SINK_OPERATOR", 9200: "OTEL_EXPORT_SINK_OPERATOR", + 9300: "CLICKHOUSE_EXPORT_SINK_OPERATOR", } var OperatorType_value = map[string]int32{ - "OPERATOR_TYPE_UNKNOWN": 0, - "MEMORY_SOURCE_OPERATOR": 1000, - "GRPC_SOURCE_OPERATOR": 1100, - "UDTF_SOURCE_OPERATOR": 1200, - "EMPTY_SOURCE_OPERATOR": 1300, - "MAP_OPERATOR": 2000, - "AGGREGATE_OPERATOR": 2100, - "FILTER_OPERATOR": 2200, - "LIMIT_OPERATOR": 2300, - "UNION_OPERATOR": 2400, - "JOIN_OPERATOR": 2500, - "MEMORY_SINK_OPERATOR": 9000, - "GRPC_SINK_OPERATOR": 9100, - "OTEL_EXPORT_SINK_OPERATOR": 9200, + "OPERATOR_TYPE_UNKNOWN": 0, + "MEMORY_SOURCE_OPERATOR": 1000, + "GRPC_SOURCE_OPERATOR": 1100, + "UDTF_SOURCE_OPERATOR": 1200, + "EMPTY_SOURCE_OPERATOR": 1300, + "CLICKHOUSE_SOURCE_OPERATOR": 1400, + "MAP_OPERATOR": 2000, + "AGGREGATE_OPERATOR": 2100, + "FILTER_OPERATOR": 2200, + "LIMIT_OPERATOR": 2300, + "UNION_OPERATOR": 2400, + "JOIN_OPERATOR": 2500, + "MEMORY_SINK_OPERATOR": 9000, + "GRPC_SINK_OPERATOR": 9100, + "OTEL_EXPORT_SINK_OPERATOR": 9200, + "CLICKHOUSE_EXPORT_SINK_OPERATOR": 9300, } func (OperatorType) EnumDescriptor() ([]byte, []int) { @@ -526,8 +532,9 @@ type Operator struct { // *Operator_UdtfSourceOp // *Operator_EmptySourceOp // *Operator_OTelSinkOp - Op isOperator_Op `protobuf_oneof:"op"` - Context map[string]string `protobuf:"bytes,15,rep,name=context,proto3" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // *Operator_ClickhouseSourceOp + // *Operator_ClickhouseSinkOp + Op isOperator_Op `protobuf_oneof:"op"` } func (m *Operator) Reset() { *m = Operator{} } @@ -608,20 +615,28 @@ type Operator_EmptySourceOp struct { type Operator_OTelSinkOp struct { OTelSinkOp *OTelExportSinkOperator `protobuf:"bytes,14,opt,name=otel_sink_op,json=otelSinkOp,proto3,oneof" json:"otel_sink_op,omitempty"` } - -func (*Operator_MemSourceOp) isOperator_Op() {} -func (*Operator_MapOp) isOperator_Op() {} -func (*Operator_AggOp) isOperator_Op() {} -func (*Operator_MemSinkOp) isOperator_Op() {} -func (*Operator_FilterOp) isOperator_Op() {} -func (*Operator_LimitOp) isOperator_Op() {} -func (*Operator_UnionOp) isOperator_Op() {} -func (*Operator_GRPCSourceOp) isOperator_Op() {} -func (*Operator_GRPCSinkOp) isOperator_Op() {} -func (*Operator_JoinOp) isOperator_Op() {} -func (*Operator_UdtfSourceOp) isOperator_Op() {} -func (*Operator_EmptySourceOp) isOperator_Op() {} -func (*Operator_OTelSinkOp) isOperator_Op() {} +type Operator_ClickhouseSourceOp struct { + ClickhouseSourceOp *ClickHouseSourceOperator `protobuf:"bytes,15,opt,name=clickhouse_source_op,json=clickhouseSourceOp,proto3,oneof" json:"clickhouse_source_op,omitempty"` +} +type Operator_ClickhouseSinkOp struct { + ClickhouseSinkOp *ClickHouseExportSinkOperator `protobuf:"bytes,16,opt,name=clickhouse_sink_op,json=clickhouseSinkOp,proto3,oneof" json:"clickhouse_sink_op,omitempty"` +} + +func (*Operator_MemSourceOp) isOperator_Op() {} +func (*Operator_MapOp) isOperator_Op() {} +func (*Operator_AggOp) isOperator_Op() {} +func (*Operator_MemSinkOp) isOperator_Op() {} +func (*Operator_FilterOp) isOperator_Op() {} +func (*Operator_LimitOp) isOperator_Op() {} +func (*Operator_UnionOp) isOperator_Op() {} +func (*Operator_GRPCSourceOp) isOperator_Op() {} +func (*Operator_GRPCSinkOp) isOperator_Op() {} +func (*Operator_JoinOp) isOperator_Op() {} +func (*Operator_UdtfSourceOp) isOperator_Op() {} +func (*Operator_EmptySourceOp) isOperator_Op() {} +func (*Operator_OTelSinkOp) isOperator_Op() {} +func (*Operator_ClickhouseSourceOp) isOperator_Op() {} +func (*Operator_ClickhouseSinkOp) isOperator_Op() {} func (m *Operator) GetOp() isOperator_Op { if m != nil { @@ -728,9 +743,16 @@ func (m *Operator) GetOTelSinkOp() *OTelExportSinkOperator { return nil } -func (m *Operator) GetContext() map[string]string { - if m != nil { - return m.Context +func (m *Operator) GetClickhouseSourceOp() *ClickHouseSourceOperator { + if x, ok := m.GetOp().(*Operator_ClickhouseSourceOp); ok { + return x.ClickhouseSourceOp + } + return nil +} + +func (m *Operator) GetClickhouseSinkOp() *ClickHouseExportSinkOperator { + if x, ok := m.GetOp().(*Operator_ClickhouseSinkOp); ok { + return x.ClickhouseSinkOp } return nil } @@ -751,6 +773,8 @@ func (*Operator) XXX_OneofWrappers() []interface{} { (*Operator_UdtfSourceOp)(nil), (*Operator_EmptySourceOp)(nil), (*Operator_OTelSinkOp)(nil), + (*Operator_ClickhouseSourceOp)(nil), + (*Operator_ClickhouseSinkOp)(nil), } } @@ -1818,6 +1842,153 @@ func (m *EmptySourceOperator) GetColumnTypes() []typespb.DataType { return nil } +type ClickHouseSourceOperator struct { + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` + Database string `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` + Query string `protobuf:"bytes,6,opt,name=query,proto3" json:"query,omitempty"` + ColumnNames []string `protobuf:"bytes,7,rep,name=column_names,json=columnNames,proto3" json:"column_names,omitempty"` + ColumnTypes []typespb.DataType `protobuf:"varint,8,rep,packed,name=column_types,json=columnTypes,proto3,enum=px.types.DataType" json:"column_types,omitempty"` + BatchSize int32 `protobuf:"varint,9,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` + Streaming bool `protobuf:"varint,10,opt,name=streaming,proto3" json:"streaming,omitempty"` + TimestampColumn string `protobuf:"bytes,11,opt,name=timestamp_column,json=timestampColumn,proto3" json:"timestamp_column,omitempty"` + PartitionColumn string `protobuf:"bytes,12,opt,name=partition_column,json=partitionColumn,proto3" json:"partition_column,omitempty"` + StartTime int64 `protobuf:"varint,13,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime int64 `protobuf:"varint,14,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` +} + +func (m *ClickHouseSourceOperator) Reset() { *m = ClickHouseSourceOperator{} } +func (*ClickHouseSourceOperator) ProtoMessage() {} +func (*ClickHouseSourceOperator) Descriptor() ([]byte, []int) { + return fileDescriptor_e5dcfc8666ec3f33, []int{18} +} +func (m *ClickHouseSourceOperator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClickHouseSourceOperator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClickHouseSourceOperator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClickHouseSourceOperator) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickHouseSourceOperator.Merge(m, src) +} +func (m *ClickHouseSourceOperator) XXX_Size() int { + return m.Size() +} +func (m *ClickHouseSourceOperator) XXX_DiscardUnknown() { + xxx_messageInfo_ClickHouseSourceOperator.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickHouseSourceOperator proto.InternalMessageInfo + +func (m *ClickHouseSourceOperator) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ClickHouseSourceOperator) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ClickHouseSourceOperator) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *ClickHouseSourceOperator) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *ClickHouseSourceOperator) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *ClickHouseSourceOperator) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *ClickHouseSourceOperator) GetColumnNames() []string { + if m != nil { + return m.ColumnNames + } + return nil +} + +func (m *ClickHouseSourceOperator) GetColumnTypes() []typespb.DataType { + if m != nil { + return m.ColumnTypes + } + return nil +} + +func (m *ClickHouseSourceOperator) GetBatchSize() int32 { + if m != nil { + return m.BatchSize + } + return 0 +} + +func (m *ClickHouseSourceOperator) GetStreaming() bool { + if m != nil { + return m.Streaming + } + return false +} + +func (m *ClickHouseSourceOperator) GetTimestampColumn() string { + if m != nil { + return m.TimestampColumn + } + return "" +} + +func (m *ClickHouseSourceOperator) GetPartitionColumn() string { + if m != nil { + return m.PartitionColumn + } + return "" +} + +func (m *ClickHouseSourceOperator) GetStartTime() int64 { + if m != nil { + return m.StartTime + } + return 0 +} + +func (m *ClickHouseSourceOperator) GetEndTime() int64 { + if m != nil { + return m.EndTime + } + return 0 +} + type OTelLog struct { Attributes []*OTelAttribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` TimeColumnIndex int64 `protobuf:"varint,2,opt,name=time_column_index,json=timeColumnIndex,proto3" json:"time_column_index,omitempty"` @@ -1830,7 +2001,7 @@ type OTelLog struct { func (m *OTelLog) Reset() { *m = OTelLog{} } func (*OTelLog) ProtoMessage() {} func (*OTelLog) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{18} + return fileDescriptor_e5dcfc8666ec3f33, []int{19} } func (m *OTelLog) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1919,7 +2090,7 @@ type OTelSpan struct { func (m *OTelSpan) Reset() { *m = OTelSpan{} } func (*OTelSpan) ProtoMessage() {} func (*OTelSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{19} + return fileDescriptor_e5dcfc8666ec3f33, []int{20} } func (m *OTelSpan) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2054,7 +2225,7 @@ type OTelMetricGauge struct { func (m *OTelMetricGauge) Reset() { *m = OTelMetricGauge{} } func (*OTelMetricGauge) ProtoMessage() {} func (*OTelMetricGauge) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{20} + return fileDescriptor_e5dcfc8666ec3f33, []int{21} } func (m *OTelMetricGauge) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2138,7 +2309,7 @@ type OTelMetricSummary struct { func (m *OTelMetricSummary) Reset() { *m = OTelMetricSummary{} } func (*OTelMetricSummary) ProtoMessage() {} func (*OTelMetricSummary) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{21} + return fileDescriptor_e5dcfc8666ec3f33, []int{22} } func (m *OTelMetricSummary) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2196,7 +2367,7 @@ type OTelMetricSummary_ValueAtQuantile struct { func (m *OTelMetricSummary_ValueAtQuantile) Reset() { *m = OTelMetricSummary_ValueAtQuantile{} } func (*OTelMetricSummary_ValueAtQuantile) ProtoMessage() {} func (*OTelMetricSummary_ValueAtQuantile) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{21, 0} + return fileDescriptor_e5dcfc8666ec3f33, []int{22, 0} } func (m *OTelMetricSummary_ValueAtQuantile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2251,7 +2422,7 @@ type OTelAttribute struct { func (m *OTelAttribute) Reset() { *m = OTelAttribute{} } func (*OTelAttribute) ProtoMessage() {} func (*OTelAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{22} + return fileDescriptor_e5dcfc8666ec3f33, []int{23} } func (m *OTelAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2342,7 +2513,7 @@ type OTelAttribute_Column struct { func (m *OTelAttribute_Column) Reset() { *m = OTelAttribute_Column{} } func (*OTelAttribute_Column) ProtoMessage() {} func (*OTelAttribute_Column) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{22, 0} + return fileDescriptor_e5dcfc8666ec3f33, []int{23, 0} } func (m *OTelAttribute_Column) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2408,7 +2579,7 @@ type OTelMetric struct { func (m *OTelMetric) Reset() { *m = OTelMetric{} } func (*OTelMetric) ProtoMessage() {} func (*OTelMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{23} + return fileDescriptor_e5dcfc8666ec3f33, []int{24} } func (m *OTelMetric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2528,7 +2699,7 @@ type OTelEndpointConfig struct { func (m *OTelEndpointConfig) Reset() { *m = OTelEndpointConfig{} } func (*OTelEndpointConfig) ProtoMessage() {} func (*OTelEndpointConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{24} + return fileDescriptor_e5dcfc8666ec3f33, []int{25} } func (m *OTelEndpointConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2585,6 +2756,89 @@ func (m *OTelEndpointConfig) GetTimeout() int64 { return 0 } +type ClickHouseConfig struct { + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"` + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,5,opt,name=password,proto3" json:"password,omitempty"` + Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"` +} + +func (m *ClickHouseConfig) Reset() { *m = ClickHouseConfig{} } +func (*ClickHouseConfig) ProtoMessage() {} +func (*ClickHouseConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_e5dcfc8666ec3f33, []int{26} +} +func (m *ClickHouseConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClickHouseConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClickHouseConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClickHouseConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickHouseConfig.Merge(m, src) +} +func (m *ClickHouseConfig) XXX_Size() int { + return m.Size() +} +func (m *ClickHouseConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClickHouseConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickHouseConfig proto.InternalMessageInfo + +func (m *ClickHouseConfig) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *ClickHouseConfig) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ClickHouseConfig) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *ClickHouseConfig) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *ClickHouseConfig) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *ClickHouseConfig) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + type OTelResource struct { Attributes []*OTelAttribute `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` } @@ -2592,7 +2846,7 @@ type OTelResource struct { func (m *OTelResource) Reset() { *m = OTelResource{} } func (*OTelResource) ProtoMessage() {} func (*OTelResource) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{25} + return fileDescriptor_e5dcfc8666ec3f33, []int{27} } func (m *OTelResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2639,7 +2893,7 @@ type OTelExportSinkOperator struct { func (m *OTelExportSinkOperator) Reset() { *m = OTelExportSinkOperator{} } func (*OTelExportSinkOperator) ProtoMessage() {} func (*OTelExportSinkOperator) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{26} + return fileDescriptor_e5dcfc8666ec3f33, []int{28} } func (m *OTelExportSinkOperator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2703,6 +2957,126 @@ func (m *OTelExportSinkOperator) GetLogs() []*OTelLog { return nil } +type ClickHouseExportSinkOperator struct { + ClickhouseConfig *ClickHouseConfig `protobuf:"bytes,1,opt,name=clickhouse_config,json=clickhouseConfig,proto3" json:"clickhouse_config,omitempty"` + TableName string `protobuf:"bytes,2,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + ColumnMappings []*ClickHouseExportSinkOperator_ColumnMapping `protobuf:"bytes,3,rep,name=column_mappings,json=columnMappings,proto3" json:"column_mappings,omitempty"` +} + +func (m *ClickHouseExportSinkOperator) Reset() { *m = ClickHouseExportSinkOperator{} } +func (*ClickHouseExportSinkOperator) ProtoMessage() {} +func (*ClickHouseExportSinkOperator) Descriptor() ([]byte, []int) { + return fileDescriptor_e5dcfc8666ec3f33, []int{29} +} +func (m *ClickHouseExportSinkOperator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClickHouseExportSinkOperator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClickHouseExportSinkOperator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClickHouseExportSinkOperator) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickHouseExportSinkOperator.Merge(m, src) +} +func (m *ClickHouseExportSinkOperator) XXX_Size() int { + return m.Size() +} +func (m *ClickHouseExportSinkOperator) XXX_DiscardUnknown() { + xxx_messageInfo_ClickHouseExportSinkOperator.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickHouseExportSinkOperator proto.InternalMessageInfo + +func (m *ClickHouseExportSinkOperator) GetClickhouseConfig() *ClickHouseConfig { + if m != nil { + return m.ClickhouseConfig + } + return nil +} + +func (m *ClickHouseExportSinkOperator) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *ClickHouseExportSinkOperator) GetColumnMappings() []*ClickHouseExportSinkOperator_ColumnMapping { + if m != nil { + return m.ColumnMappings + } + return nil +} + +type ClickHouseExportSinkOperator_ColumnMapping struct { + InputColumnIndex int32 `protobuf:"varint,1,opt,name=input_column_index,json=inputColumnIndex,proto3" json:"input_column_index,omitempty"` + ClickhouseColumnName string `protobuf:"bytes,2,opt,name=clickhouse_column_name,json=clickhouseColumnName,proto3" json:"clickhouse_column_name,omitempty"` + ColumnType typespb.DataType `protobuf:"varint,3,opt,name=column_type,json=columnType,proto3,enum=px.types.DataType" json:"column_type,omitempty"` +} + +func (m *ClickHouseExportSinkOperator_ColumnMapping) Reset() { + *m = ClickHouseExportSinkOperator_ColumnMapping{} +} +func (*ClickHouseExportSinkOperator_ColumnMapping) ProtoMessage() {} +func (*ClickHouseExportSinkOperator_ColumnMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_e5dcfc8666ec3f33, []int{29, 0} +} +func (m *ClickHouseExportSinkOperator_ColumnMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClickHouseExportSinkOperator_ColumnMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClickHouseExportSinkOperator_ColumnMapping.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClickHouseExportSinkOperator_ColumnMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClickHouseExportSinkOperator_ColumnMapping.Merge(m, src) +} +func (m *ClickHouseExportSinkOperator_ColumnMapping) XXX_Size() int { + return m.Size() +} +func (m *ClickHouseExportSinkOperator_ColumnMapping) XXX_DiscardUnknown() { + xxx_messageInfo_ClickHouseExportSinkOperator_ColumnMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_ClickHouseExportSinkOperator_ColumnMapping proto.InternalMessageInfo + +func (m *ClickHouseExportSinkOperator_ColumnMapping) GetInputColumnIndex() int32 { + if m != nil { + return m.InputColumnIndex + } + return 0 +} + +func (m *ClickHouseExportSinkOperator_ColumnMapping) GetClickhouseColumnName() string { + if m != nil { + return m.ClickhouseColumnName + } + return "" +} + +func (m *ClickHouseExportSinkOperator_ColumnMapping) GetColumnType() typespb.DataType { + if m != nil { + return m.ColumnType + } + return typespb.DATA_TYPE_UNKNOWN +} + type ScalarExpression struct { // Types that are valid to be assigned to Value: // @@ -2715,7 +3089,7 @@ type ScalarExpression struct { func (m *ScalarExpression) Reset() { *m = ScalarExpression{} } func (*ScalarExpression) ProtoMessage() {} func (*ScalarExpression) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{27} + return fileDescriptor_e5dcfc8666ec3f33, []int{30} } func (m *ScalarExpression) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2818,7 +3192,7 @@ type ScalarValue struct { func (m *ScalarValue) Reset() { *m = ScalarValue{} } func (*ScalarValue) ProtoMessage() {} func (*ScalarValue) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{28} + return fileDescriptor_e5dcfc8666ec3f33, []int{31} } func (m *ScalarValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2959,7 +3333,7 @@ type ScalarFunc struct { func (m *ScalarFunc) Reset() { *m = ScalarFunc{} } func (*ScalarFunc) ProtoMessage() {} func (*ScalarFunc) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{29} + return fileDescriptor_e5dcfc8666ec3f33, []int{32} } func (m *ScalarFunc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3034,7 +3408,7 @@ type AggregateExpression struct { func (m *AggregateExpression) Reset() { *m = AggregateExpression{} } func (*AggregateExpression) ProtoMessage() {} func (*AggregateExpression) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{30} + return fileDescriptor_e5dcfc8666ec3f33, []int{33} } func (m *AggregateExpression) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3109,7 +3483,7 @@ type AggregateExpression_Arg struct { func (m *AggregateExpression_Arg) Reset() { *m = AggregateExpression_Arg{} } func (*AggregateExpression_Arg) ProtoMessage() {} func (*AggregateExpression_Arg) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{30, 0} + return fileDescriptor_e5dcfc8666ec3f33, []int{33, 0} } func (m *AggregateExpression_Arg) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3192,7 +3566,7 @@ type Column struct { func (m *Column) Reset() { *m = Column{} } func (*Column) ProtoMessage() {} func (*Column) Descriptor() ([]byte, []int) { - return fileDescriptor_e5dcfc8666ec3f33, []int{31} + return fileDescriptor_e5dcfc8666ec3f33, []int{34} } func (m *Column) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3246,7 +3620,6 @@ func init() { proto.RegisterType((*DAG_DAGNode)(nil), "px.carnot.planpb.DAG.DAGNode") proto.RegisterType((*PlanNode)(nil), "px.carnot.planpb.PlanNode") proto.RegisterType((*Operator)(nil), "px.carnot.planpb.Operator") - proto.RegisterMapType((map[string]string)(nil), "px.carnot.planpb.Operator.ContextEntry") proto.RegisterType((*MemorySourceOperator)(nil), "px.carnot.planpb.MemorySourceOperator") proto.RegisterType((*MemorySinkOperator)(nil), "px.carnot.planpb.MemorySinkOperator") proto.RegisterType((*GRPCSourceOperator)(nil), "px.carnot.planpb.GRPCSourceOperator") @@ -3264,6 +3637,7 @@ func init() { proto.RegisterType((*JoinOperator_ParentColumn)(nil), "px.carnot.planpb.JoinOperator.ParentColumn") proto.RegisterType((*UDTFSourceOperator)(nil), "px.carnot.planpb.UDTFSourceOperator") proto.RegisterType((*EmptySourceOperator)(nil), "px.carnot.planpb.EmptySourceOperator") + proto.RegisterType((*ClickHouseSourceOperator)(nil), "px.carnot.planpb.ClickHouseSourceOperator") proto.RegisterType((*OTelLog)(nil), "px.carnot.planpb.OTelLog") proto.RegisterType((*OTelSpan)(nil), "px.carnot.planpb.OTelSpan") proto.RegisterType((*OTelMetricGauge)(nil), "px.carnot.planpb.OTelMetricGauge") @@ -3274,8 +3648,11 @@ func init() { proto.RegisterType((*OTelMetric)(nil), "px.carnot.planpb.OTelMetric") proto.RegisterType((*OTelEndpointConfig)(nil), "px.carnot.planpb.OTelEndpointConfig") proto.RegisterMapType((map[string]string)(nil), "px.carnot.planpb.OTelEndpointConfig.HeadersEntry") + proto.RegisterType((*ClickHouseConfig)(nil), "px.carnot.planpb.ClickHouseConfig") proto.RegisterType((*OTelResource)(nil), "px.carnot.planpb.OTelResource") proto.RegisterType((*OTelExportSinkOperator)(nil), "px.carnot.planpb.OTelExportSinkOperator") + proto.RegisterType((*ClickHouseExportSinkOperator)(nil), "px.carnot.planpb.ClickHouseExportSinkOperator") + proto.RegisterType((*ClickHouseExportSinkOperator_ColumnMapping)(nil), "px.carnot.planpb.ClickHouseExportSinkOperator.ColumnMapping") proto.RegisterType((*ScalarExpression)(nil), "px.carnot.planpb.ScalarExpression") proto.RegisterType((*ScalarValue)(nil), "px.carnot.planpb.ScalarValue") proto.RegisterType((*ScalarFunc)(nil), "px.carnot.planpb.ScalarFunc") @@ -3287,216 +3664,238 @@ func init() { func init() { proto.RegisterFile("src/carnot/planpb/plan.proto", fileDescriptor_e5dcfc8666ec3f33) } var fileDescriptor_e5dcfc8666ec3f33 = []byte{ - // 3333 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x4d, 0x6c, 0x1b, 0x47, - 0x96, 0x66, 0x93, 0x14, 0x7f, 0x1e, 0x7f, 0x55, 0x96, 0x6c, 0x99, 0xb6, 0x29, 0x87, 0xb1, 0xd7, - 0x8a, 0x37, 0xa1, 0x6c, 0xd9, 0xf1, 0x3a, 0x8e, 0xb3, 0x09, 0x25, 0x51, 0x12, 0x15, 0x49, 0xd4, - 0x96, 0xa8, 0x64, 0xb3, 0x1b, 0x6c, 0xa3, 0xc5, 0x2e, 0xb5, 0x3b, 0x26, 0xbb, 0x3b, 0xfd, 0x63, - 0x4b, 0x01, 0x16, 0x9b, 0xdd, 0xd3, 0x1e, 0x72, 0xd8, 0xc3, 0x1e, 0x16, 0x7b, 0xdf, 0x45, 0x2e, - 0x33, 0xc8, 0x61, 0x8e, 0x73, 0x98, 0x01, 0x06, 0xc8, 0x1c, 0x06, 0x81, 0x67, 0x4e, 0x39, 0x19, - 0xb1, 0x72, 0xf1, 0x61, 0x30, 0xc8, 0xdc, 0xe7, 0x30, 0xa8, 0x9f, 0x26, 0x9b, 0xea, 0xa6, 0xa5, - 0x64, 0x06, 0x03, 0xcc, 0xc1, 0x16, 0xeb, 0xd5, 0xf7, 0x5e, 0xbd, 0xbf, 0x7a, 0xf5, 0xaa, 0x1a, - 0x2e, 0x3a, 0x76, 0x77, 0xbe, 0xab, 0xd8, 0x86, 0xe9, 0xce, 0x5b, 0x3d, 0xc5, 0xb0, 0xf6, 0xd8, - 0x9f, 0xba, 0x65, 0x9b, 0xae, 0x89, 0xca, 0xd6, 0x41, 0x9d, 0x4f, 0xd6, 0xf9, 0x64, 0x65, 0x4a, - 0x33, 0x35, 0x93, 0x4d, 0xce, 0xd3, 0x5f, 0x1c, 0x57, 0xa9, 0x6a, 0xa6, 0xa9, 0xf5, 0xc8, 0x3c, - 0x1b, 0xed, 0x79, 0xfb, 0xf3, 0x8f, 0x6d, 0xc5, 0xb2, 0x88, 0xed, 0x88, 0xf9, 0x59, 0xba, 0x8a, - 0x62, 0xe9, 0x1c, 0x30, 0xef, 0x79, 0xba, 0x6a, 0xed, 0xb1, 0x3f, 0x02, 0x70, 0x85, 0x02, 0x9c, - 0x07, 0x8a, 0x4d, 0xd4, 0x79, 0xf7, 0xd0, 0x22, 0x0e, 0xff, 0xdf, 0xda, 0xe3, 0x7f, 0x39, 0xaa, - 0xf6, 0xef, 0x12, 0xe4, 0xb6, 0x7b, 0x8a, 0xd1, 0xb6, 0x5c, 0xdd, 0x34, 0x1c, 0x34, 0x03, 0x69, - 0x72, 0x60, 0xf5, 0x14, 0xdd, 0x98, 0x89, 0x5f, 0x96, 0xe6, 0x32, 0xd8, 0x1f, 0xd2, 0x19, 0xc5, - 0x50, 0x7a, 0x87, 0x9f, 0x90, 0x99, 0x04, 0x9f, 0x11, 0x43, 0x74, 0x17, 0xce, 0xf7, 0x95, 0x03, - 0xd9, 0xf4, 0x5c, 0xcb, 0x73, 0x65, 0xdb, 0x7c, 0xec, 0xc8, 0x16, 0xb1, 0x65, 0x57, 0xd9, 0xeb, - 0x91, 0x99, 0xe4, 0x65, 0x69, 0x2e, 0x81, 0xa7, 0xfb, 0xca, 0x41, 0x9b, 0xcd, 0x63, 0xf3, 0xb1, - 0xb3, 0x4d, 0xec, 0x0e, 0x9d, 0x5c, 0x4f, 0x66, 0xa4, 0x72, 0xbc, 0xf6, 0x2c, 0x01, 0x49, 0xaa, - 0x03, 0xba, 0x06, 0x09, 0x55, 0xd1, 0x66, 0xa4, 0xcb, 0xd2, 0x5c, 0x6e, 0x61, 0xba, 0x7e, 0xdc, - 0x53, 0xf5, 0xe5, 0xc6, 0x2a, 0xa6, 0x08, 0x74, 0x1b, 0x26, 0x0c, 0x53, 0x25, 0xce, 0x4c, 0xfc, - 0x72, 0x62, 0x2e, 0xb7, 0x50, 0x0d, 0x43, 0xa9, 0xbc, 0x15, 0x5b, 0xd1, 0xfa, 0xc4, 0x70, 0x31, - 0x07, 0xa3, 0x77, 0x20, 0x4f, 0x67, 0x65, 0x93, 0xdb, 0xca, 0x54, 0xcb, 0x2d, 0x5c, 0x8a, 0x66, - 0x16, 0x0e, 0xc1, 0x39, 0x2b, 0xe0, 0x9d, 0x1d, 0x40, 0xba, 0xd1, 0x35, 0xfb, 0xba, 0xa1, 0xc9, - 0x8a, 0x46, 0x0c, 0x57, 0xd6, 0x55, 0x67, 0x66, 0x82, 0x29, 0x51, 0xa2, 0x72, 0x78, 0x18, 0xea, - 0xbb, 0xbb, 0xad, 0xe5, 0xc5, 0xa9, 0xa3, 0xa7, 0xb3, 0xe5, 0x96, 0x80, 0x37, 0x28, 0xba, 0xb5, - 0xec, 0xe0, 0xb2, 0x3e, 0x42, 0x51, 0x1d, 0xe4, 0xc1, 0x25, 0x72, 0x40, 0xba, 0x1e, 0x5d, 0x42, - 0x76, 0x5c, 0xc5, 0xf5, 0x1c, 0x59, 0x25, 0x8e, 0xab, 0x1b, 0x0a, 0xd7, 0x33, 0xc5, 0xe4, 0xdf, - 0x8c, 0xd6, 0xb3, 0xde, 0xf4, 0x79, 0x77, 0x18, 0xeb, 0xf2, 0x90, 0x13, 0x5f, 0x20, 0x63, 0xe7, - 0x9c, 0xca, 0x3e, 0x54, 0xc6, 0xb3, 0xa2, 0x97, 0x20, 0xaf, 0xd9, 0x56, 0x57, 0x56, 0x54, 0xd5, - 0x26, 0x8e, 0xc3, 0x62, 0x92, 0xc5, 0x39, 0x4a, 0x6b, 0x70, 0x12, 0xba, 0x0a, 0x45, 0xc7, 0xe9, - 0xc9, 0xae, 0x62, 0x6b, 0xc4, 0x35, 0x94, 0x3e, 0x61, 0x19, 0x93, 0xc5, 0x05, 0xc7, 0xe9, 0x75, - 0x06, 0xc4, 0xf5, 0x64, 0x26, 0x51, 0x4e, 0xd6, 0x0e, 0x21, 0x1f, 0x0c, 0x09, 0x2a, 0x42, 0x5c, - 0x57, 0x99, 0xd4, 0x24, 0x8e, 0xeb, 0xaa, 0x1f, 0xfa, 0xf8, 0x89, 0xa1, 0xbf, 0xe1, 0x87, 0x3e, - 0xc1, 0xbc, 0x52, 0x89, 0xf6, 0xca, 0x96, 0xa9, 0x12, 0x11, 0xf6, 0xda, 0xff, 0x49, 0x90, 0x58, - 0x6e, 0xac, 0xa2, 0x5b, 0x3e, 0xa7, 0xc4, 0x38, 0x2f, 0x45, 0x2e, 0x42, 0xff, 0x05, 0x98, 0x2b, - 0x3a, 0xa4, 0x05, 0x25, 0xa4, 0x32, 0xb5, 0xdf, 0xb4, 0x5d, 0xa2, 0xca, 0x96, 0x62, 0x13, 0xc3, - 0xa5, 0x09, 0x95, 0x98, 0x4b, 0xe2, 0x02, 0xa7, 0x6e, 0x73, 0x22, 0xba, 0x06, 0x25, 0x01, 0xeb, - 0x3e, 0xd0, 0x7b, 0xaa, 0x4d, 0x0c, 0xa6, 0x7a, 0x12, 0x0b, 0xee, 0x25, 0x41, 0xad, 0xad, 0x40, - 0xc6, 0x57, 0x3d, 0xb4, 0xd6, 0x75, 0x88, 0x9b, 0x96, 0xf0, 0x4e, 0x84, 0xc9, 0x6d, 0x8b, 0xd8, - 0x8a, 0x6b, 0xda, 0x38, 0x6e, 0x5a, 0xb5, 0xff, 0xc8, 0x42, 0xc6, 0x27, 0xa0, 0xbf, 0x83, 0xb4, - 0x69, 0xc9, 0x74, 0xc7, 0x33, 0x69, 0xc5, 0xa8, 0xbd, 0xe2, 0x83, 0x3b, 0x87, 0x16, 0xc1, 0x29, - 0xd3, 0xa2, 0x7f, 0xd1, 0x06, 0x14, 0xfa, 0xa4, 0x2f, 0x3b, 0xa6, 0x67, 0x77, 0x89, 0x3c, 0x58, - 0xfc, 0x6f, 0xc2, 0xec, 0x9b, 0xa4, 0x6f, 0xda, 0x87, 0x3b, 0x0c, 0xe8, 0x8b, 0x5a, 0x8b, 0xe1, - 0x5c, 0x9f, 0xf4, 0x7d, 0x22, 0xba, 0x03, 0xa9, 0xbe, 0x62, 0x51, 0x31, 0x89, 0x71, 0x9b, 0x6e, - 0x53, 0xb1, 0x02, 0xdc, 0x13, 0x7d, 0x3a, 0x44, 0xf7, 0x21, 0xa5, 0x68, 0x1a, 0xe5, 0xe3, 0x9b, - 0xf5, 0xe5, 0x30, 0x5f, 0x43, 0xd3, 0x6c, 0xa2, 0x29, 0x6e, 0x70, 0xed, 0x09, 0x45, 0xd3, 0xda, - 0x16, 0x5a, 0x81, 0x1c, 0xb3, 0x41, 0x37, 0x1e, 0x52, 0x11, 0x13, 0x4c, 0xc4, 0x95, 0xb1, 0x16, - 0xe8, 0xc6, 0xc3, 0x80, 0x8c, 0x2c, 0xd5, 0x9f, 0x91, 0xd0, 0xdb, 0x90, 0xdd, 0xd7, 0x7b, 0x2e, - 0xb1, 0xa9, 0x94, 0x14, 0x93, 0x72, 0x39, 0x2c, 0x65, 0x85, 0x41, 0x02, 0x12, 0x32, 0xfb, 0x82, - 0x82, 0xee, 0x43, 0xa6, 0xa7, 0xf7, 0x75, 0x97, 0xf2, 0xa7, 0x19, 0xff, 0x6c, 0x98, 0x7f, 0x83, - 0x22, 0x02, 0xec, 0xe9, 0x1e, 0x27, 0x50, 0x6e, 0xcf, 0xa0, 0xc5, 0xc1, 0xb4, 0x66, 0x32, 0xe3, - 0xb8, 0x77, 0x29, 0x22, 0xc8, 0xed, 0x71, 0x02, 0xfa, 0x17, 0x28, 0xb2, 0x9d, 0x3c, 0x8c, 0x64, - 0x76, 0x9c, 0x1f, 0x56, 0xf1, 0xf6, 0xd2, 0x68, 0x1c, 0x17, 0xcb, 0x47, 0x4f, 0x67, 0xf3, 0x41, - 0xfa, 0x5a, 0x0c, 0xb3, 0xca, 0x30, 0x08, 0xed, 0xfb, 0xa2, 0x52, 0xf8, 0x5e, 0x7e, 0xce, 0x0d, - 0xac, 0x8d, 0x11, 0x1f, 0x70, 0xf2, 0x62, 0xf1, 0xe8, 0xe9, 0x2c, 0x0c, 0xa9, 0x6b, 0x31, 0x0c, - 0x4c, 0x34, 0xf7, 0xfa, 0x1b, 0x90, 0xfe, 0xc8, 0xd4, 0x99, 0xd5, 0x39, 0x26, 0x32, 0x22, 0x75, - 0xd7, 0x4d, 0x3d, 0x68, 0x74, 0xea, 0x23, 0x36, 0x46, 0x1b, 0x50, 0xf4, 0x54, 0x77, 0x3f, 0x60, - 0x73, 0x7e, 0x9c, 0xcd, 0xbb, 0xcb, 0x9d, 0x95, 0x50, 0xee, 0xe6, 0x29, 0xf7, 0xc0, 0xc2, 0x36, - 0x94, 0x48, 0xdf, 0x72, 0x0f, 0x03, 0xe2, 0x0a, 0x4c, 0xdc, 0xd5, 0xb0, 0xb8, 0x26, 0x05, 0x86, - 0xe4, 0x15, 0x48, 0x90, 0x8c, 0x3e, 0x84, 0xbc, 0xe9, 0x92, 0xde, 0xc0, 0x65, 0x45, 0x26, 0x6d, - 0x2e, 0x62, 0x67, 0x76, 0x48, 0xaf, 0x79, 0x60, 0x99, 0xb6, 0x1b, 0xf6, 0x1b, 0x9d, 0x1b, 0xfa, - 0x8d, 0xca, 0x13, 0x7e, 0x6b, 0x40, 0xba, 0x6b, 0x1a, 0x2e, 0x39, 0x70, 0x67, 0x4a, 0xac, 0xd2, - 0x5d, 0x1b, 0xbf, 0xe5, 0xeb, 0x4b, 0x1c, 0xd9, 0x34, 0x5c, 0xfb, 0x10, 0xfb, 0x7c, 0x95, 0x7b, - 0x90, 0x0f, 0x4e, 0xa0, 0x32, 0x24, 0x1e, 0x92, 0x43, 0x71, 0x08, 0xd0, 0x9f, 0x68, 0x0a, 0x26, - 0x1e, 0x29, 0x3d, 0xcf, 0xaf, 0xf9, 0x7c, 0x70, 0x2f, 0x7e, 0x57, 0x5a, 0x4c, 0xd2, 0x52, 0x55, - 0xfb, 0x75, 0x1c, 0xa6, 0xa2, 0x0a, 0x03, 0x42, 0x90, 0x64, 0x67, 0x05, 0x97, 0xc5, 0x7e, 0xa3, - 0x59, 0xc8, 0x75, 0xcd, 0x9e, 0xd7, 0x37, 0x64, 0x5d, 0x3d, 0xe0, 0x87, 0x7a, 0x02, 0x03, 0x27, - 0xb5, 0xd4, 0x03, 0x87, 0x9e, 0x46, 0x02, 0x40, 0xf1, 0xbc, 0xf6, 0x67, 0xb1, 0x60, 0xda, 0xa2, - 0x24, 0xf4, 0xfa, 0x00, 0xc2, 0xda, 0x1b, 0x56, 0x8b, 0x8b, 0x0b, 0x88, 0x9a, 0xce, 0xfb, 0x9d, - 0x65, 0xc5, 0x55, 0x58, 0x85, 0x13, 0x6c, 0xf4, 0xb7, 0x83, 0xee, 0x01, 0x38, 0xae, 0x62, 0xbb, - 0xb2, 0xab, 0xf7, 0x89, 0xa8, 0x10, 0x17, 0xea, 0xbc, 0xf7, 0xaa, 0xfb, 0xbd, 0x57, 0xbd, 0x65, - 0xb8, 0x77, 0x6e, 0xbf, 0x47, 0x4d, 0xc4, 0x59, 0x06, 0xef, 0xe8, 0x7d, 0xda, 0xf7, 0x64, 0x1d, - 0x97, 0x56, 0x57, 0xca, 0x9a, 0x3a, 0x99, 0x35, 0x43, 0xd1, 0x8c, 0xf3, 0x2c, 0xa4, 0x58, 0x77, - 0xe4, 0xb2, 0x6a, 0x90, 0xc5, 0x62, 0x84, 0x2e, 0x52, 0x89, 0x36, 0x51, 0x68, 0x7f, 0xc0, 0xb6, - 0x7a, 0x06, 0x0f, 0x09, 0xb5, 0xaf, 0x24, 0x40, 0xe1, 0x52, 0x15, 0xe9, 0xd1, 0xe3, 0xde, 0x88, - 0x9f, 0xce, 0x1b, 0xa7, 0xf0, 0xf3, 0x3a, 0x4c, 0x0b, 0x88, 0x43, 0xfa, 0x8a, 0xe1, 0xea, 0xdd, - 0x11, 0x87, 0x9f, 0x1d, 0x2e, 0xb1, 0x23, 0xe6, 0xd9, 0x32, 0x67, 0x38, 0x53, 0x90, 0xe6, 0xd4, - 0x0c, 0x40, 0xe1, 0x92, 0x13, 0xd2, 0x5d, 0xfa, 0x61, 0xba, 0xc7, 0x43, 0xba, 0xd7, 0xbe, 0x4a, - 0x42, 0xf9, 0x78, 0x11, 0x62, 0x7d, 0xed, 0x48, 0x93, 0xe3, 0x0f, 0xd1, 0xdd, 0xd1, 0xca, 0xa9, - 0xab, 0xec, 0xf0, 0x4a, 0x1e, 0xaf, 0x89, 0xad, 0xe5, 0xd1, 0x9a, 0xd8, 0x52, 0xd1, 0x0e, 0xe4, - 0x45, 0x37, 0x3c, 0x6c, 0x82, 0x73, 0x0b, 0xf5, 0x93, 0x4b, 0x62, 0x1d, 0x13, 0xc7, 0xeb, 0xb9, - 0xac, 0x3b, 0xa6, 0x67, 0x28, 0x97, 0xc2, 0x86, 0x48, 0x03, 0xd4, 0x35, 0x0d, 0x83, 0x74, 0x5d, - 0x7e, 0x16, 0xf0, 0xe6, 0x90, 0xa7, 0xec, 0xdd, 0x53, 0x88, 0xa6, 0x84, 0xa5, 0x81, 0x00, 0xbf, - 0xbf, 0x9d, 0xec, 0x1e, 0x27, 0x55, 0x7e, 0x23, 0x41, 0x2e, 0xa0, 0x07, 0xba, 0x04, 0xc0, 0xcc, - 0x90, 0x03, 0x69, 0x96, 0x65, 0x94, 0xad, 0xbf, 0x9a, 0x5c, 0xab, 0xfc, 0x3d, 0x4c, 0x47, 0x3a, - 0x20, 0xa2, 0x8d, 0x95, 0x22, 0xda, 0xd8, 0xc5, 0x02, 0xe4, 0x02, 0x4d, 0xf9, 0x7a, 0x32, 0x13, - 0x2f, 0x27, 0x6a, 0x8f, 0x20, 0x17, 0x68, 0x5b, 0xd0, 0x32, 0xe4, 0xc8, 0x81, 0x45, 0x73, 0x87, - 0x85, 0x86, 0xf7, 0x99, 0x11, 0x07, 0xe1, 0x4e, 0x57, 0xe9, 0x29, 0x76, 0x73, 0x00, 0xc5, 0x41, - 0xb6, 0xd3, 0x24, 0xf2, 0x8f, 0xe3, 0x30, 0x19, 0xea, 0x7b, 0xd0, 0x5b, 0x90, 0x62, 0x65, 0xd8, - 0x5f, 0xf9, 0xea, 0x0b, 0x9a, 0xa5, 0xc0, 0xe2, 0x82, 0x09, 0xdd, 0x80, 0x94, 0x66, 0x9b, 0x9e, - 0xe5, 0xdf, 0xaa, 0x66, 0xc2, 0xec, 0x4b, 0x4c, 0x07, 0x2c, 0x70, 0xb4, 0x6e, 0xb3, 0x5f, 0x23, - 0x11, 0x04, 0x46, 0xe2, 0x01, 0x9c, 0x85, 0x1c, 0x13, 0x2e, 0x00, 0x49, 0x0e, 0x60, 0x24, 0x0e, - 0xa8, 0x40, 0xe6, 0xb1, 0x6e, 0xa8, 0xe6, 0x63, 0xa2, 0xb2, 0x4c, 0xce, 0xe0, 0xc1, 0x98, 0x32, - 0x5b, 0x8a, 0xed, 0xea, 0x4a, 0x4f, 0x56, 0x34, 0x8d, 0x15, 0xd8, 0x0c, 0x06, 0x41, 0x6a, 0x68, - 0x1a, 0x7a, 0x05, 0xca, 0xfb, 0xba, 0xa1, 0xf4, 0xf4, 0x4f, 0x88, 0x6c, 0xb3, 0x7c, 0x75, 0x58, - 0x3d, 0xcd, 0xe0, 0x92, 0x4f, 0xe7, 0x69, 0xec, 0xd4, 0xfe, 0x53, 0x82, 0xe2, 0x68, 0x7f, 0x86, - 0x16, 0x01, 0x86, 0x5e, 0x17, 0x77, 0xce, 0xd3, 0xc4, 0x2a, 0xc0, 0x85, 0x16, 0xe8, 0x51, 0x4b, - 0x5d, 0x72, 0xb2, 0xcf, 0x7c, 0x60, 0xed, 0x53, 0x09, 0x0a, 0x23, 0xad, 0x1e, 0x3d, 0x4b, 0x59, - 0xab, 0xc7, 0x94, 0x48, 0x60, 0x3e, 0xf8, 0x21, 0xb2, 0x69, 0x2e, 0x2b, 0x7b, 0xa6, 0xcd, 0x77, - 0xab, 0x63, 0x77, 0x1d, 0x71, 0xd5, 0x28, 0x0c, 0xa8, 0x3b, 0x76, 0xd7, 0xa9, 0x3d, 0x97, 0xa0, - 0x30, 0xd2, 0x2f, 0x86, 0x72, 0x4e, 0x0a, 0x6f, 0xc6, 0xf7, 0xa0, 0x24, 0x20, 0x7d, 0xc5, 0xb2, - 0x74, 0x43, 0xf3, 0xf5, 0x7a, 0xed, 0x84, 0x66, 0x54, 0x68, 0xb9, 0xc9, 0xb9, 0x70, 0xb1, 0x1b, - 0x1c, 0x3a, 0xe8, 0x0a, 0x14, 0x07, 0x4f, 0x06, 0x7b, 0x8a, 0xdb, 0x7d, 0xc0, 0xab, 0x2c, 0xce, - 0xdb, 0xfc, 0xa5, 0x60, 0x91, 0xd2, 0x2a, 0x77, 0xa0, 0x30, 0x22, 0x86, 0x9a, 0xea, 0xf7, 0x0c, - 0x86, 0x4a, 0x0e, 0x84, 0xce, 0x09, 0x5c, 0x10, 0x6d, 0x03, 0x27, 0xd6, 0xbe, 0x4c, 0x42, 0x3e, - 0xd8, 0x24, 0xa2, 0x37, 0x21, 0x19, 0xb8, 0x0d, 0x5d, 0x7b, 0x71, 0x4b, 0xc9, 0x06, 0xac, 0xa6, - 0x30, 0x26, 0xa4, 0xc0, 0x19, 0xf2, 0xb1, 0xa7, 0xf4, 0x74, 0xf7, 0x50, 0xee, 0x9a, 0x86, 0xaa, - 0xf3, 0x1a, 0xcc, 0xfd, 0x70, 0xe3, 0x04, 0x59, 0x4d, 0xc1, 0xb9, 0xe4, 0x33, 0x62, 0x44, 0x8e, - 0x93, 0x1c, 0x84, 0xa1, 0x28, 0x8e, 0x0e, 0x3f, 0xfa, 0xfc, 0xa2, 0xfb, 0xb7, 0x27, 0x48, 0xe7, - 0xd7, 0x4d, 0x91, 0x10, 0x05, 0x2e, 0x62, 0x49, 0xa4, 0xc5, 0xf1, 0xe8, 0x26, 0xc3, 0xd1, 0x0d, - 0x47, 0x61, 0x22, 0x22, 0x0a, 0x7d, 0x98, 0x0c, 0x59, 0x81, 0xae, 0xc3, 0x64, 0x8f, 0xec, 0xfb, - 0xfa, 0xf2, 0x70, 0x88, 0xab, 0x6b, 0x89, 0x4e, 0x2c, 0x0d, 0x03, 0x82, 0x5e, 0x05, 0x64, 0xeb, - 0xda, 0x83, 0x63, 0xe0, 0x38, 0x03, 0x97, 0xd9, 0x4c, 0x00, 0x5d, 0xe9, 0x40, 0x3e, 0x68, 0x16, - 0xb5, 0x83, 0x5f, 0xb5, 0x47, 0x16, 0xc9, 0x71, 0x1a, 0x5f, 0x60, 0x68, 0x6a, 0x50, 0x74, 0x2e, - 0x90, 0x14, 0xb5, 0xd7, 0x21, 0xe3, 0x87, 0x15, 0x65, 0x61, 0xa2, 0xb5, 0xb5, 0xd5, 0xc4, 0xe5, - 0x18, 0x2a, 0x02, 0x6c, 0x34, 0x57, 0x3a, 0x72, 0x7b, 0xb7, 0xd3, 0xc4, 0x65, 0x89, 0x8e, 0x57, - 0x76, 0x37, 0x36, 0xc4, 0x38, 0x51, 0xdb, 0x07, 0x14, 0xbe, 0x2b, 0x44, 0x36, 0x5f, 0xf7, 0x01, - 0x14, 0x5b, 0x93, 0x45, 0x2d, 0x8e, 0x8f, 0x7b, 0x6d, 0xe0, 0x95, 0x45, 0x74, 0x95, 0x8a, 0xad, - 0xb1, 0x5f, 0x4e, 0xcd, 0x84, 0x33, 0x11, 0x97, 0x88, 0xd3, 0xec, 0xd0, 0x1f, 0x76, 0x10, 0xd7, - 0x7e, 0x14, 0x87, 0x34, 0xbd, 0x4c, 0x6c, 0x98, 0x1a, 0x7a, 0x1b, 0x40, 0x71, 0x5d, 0x5b, 0xdf, - 0xf3, 0xdc, 0xc1, 0x31, 0x32, 0x1b, 0x7d, 0x2f, 0x69, 0xf8, 0x38, 0x1c, 0x60, 0xa1, 0xc9, 0x40, - 0xdb, 0xe1, 0x70, 0x7c, 0x13, 0xb8, 0x44, 0x27, 0x82, 0xc9, 0xf0, 0x26, 0x54, 0xcc, 0x3d, 0x87, - 0xd8, 0x8f, 0x88, 0x2a, 0x87, 0x99, 0x12, 0x8c, 0xe9, 0x9c, 0x8f, 0xe8, 0x1c, 0x63, 0xbe, 0x06, - 0x25, 0x87, 0x3c, 0x22, 0x36, 0xdd, 0x8a, 0x86, 0xd7, 0xdf, 0x23, 0xb6, 0x78, 0x6a, 0x2c, 0xfa, - 0xe4, 0x2d, 0x46, 0x45, 0x2f, 0x43, 0x61, 0x00, 0x64, 0x97, 0xa2, 0x09, 0x16, 0xaa, 0xbc, 0x4f, - 0xec, 0x90, 0x03, 0x97, 0xaa, 0xbd, 0x67, 0xaa, 0x87, 0xa3, 0x1a, 0xa4, 0xb8, 0xda, 0x74, 0x22, - 0xb0, 0x72, 0xed, 0xb3, 0x24, 0x64, 0xd8, 0xe5, 0xcb, 0x52, 0x68, 0x4a, 0xe6, 0x68, 0x3c, 0x64, - 0xc7, 0xb5, 0x69, 0xcf, 0xce, 0xd2, 0x80, 0xde, 0xc7, 0x28, 0x71, 0x87, 0xd1, 0xd0, 0xab, 0x30, - 0xc9, 0x20, 0x61, 0x97, 0xac, 0xc5, 0x70, 0x89, 0x4e, 0x05, 0xed, 0x1a, 0x8d, 0x40, 0xe2, 0xfb, - 0x47, 0x60, 0x19, 0xa6, 0x5d, 0x5b, 0x61, 0xfd, 0xea, 0xe8, 0x92, 0xcc, 0x3d, 0x8b, 0x93, 0x47, - 0x4f, 0x67, 0x0b, 0x1d, 0x0a, 0x68, 0x2d, 0x8b, 0x6a, 0x81, 0x18, 0xbe, 0xa5, 0x06, 0xd5, 0x68, - 0xc0, 0x94, 0x63, 0x29, 0x46, 0x48, 0xc8, 0x04, 0x13, 0xc2, 0x3a, 0x60, 0x6a, 0xff, 0x40, 0xc6, - 0x24, 0x45, 0x8f, 0x8a, 0xe8, 0xc0, 0x05, 0xb1, 0x5b, 0x23, 0x25, 0x31, 0xef, 0x2e, 0x9e, 0x3d, - 0x7a, 0x3a, 0x8b, 0xf8, 0x26, 0x1f, 0x91, 0x77, 0xce, 0x1a, 0xd2, 0x46, 0xa4, 0xbe, 0x0e, 0xe7, - 0x86, 0x17, 0xb6, 0x51, 0x89, 0x69, 0x16, 0xaf, 0xa9, 0xc1, 0x05, 0x2d, 0xc8, 0x76, 0x13, 0xa6, - 0x89, 0x11, 0x95, 0x66, 0x19, 0xc6, 0x84, 0x88, 0x11, 0xca, 0xb0, 0x4b, 0x00, 0x0f, 0x75, 0x43, - 0xe5, 0xfb, 0x98, 0x3d, 0x9a, 0x24, 0x70, 0x96, 0x52, 0xd8, 0x46, 0x5d, 0x4c, 0xf1, 0x9d, 0x5f, - 0xfb, 0x57, 0x28, 0xd1, 0x60, 0x6c, 0x12, 0xd7, 0xd6, 0xbb, 0xab, 0x8a, 0xa7, 0x11, 0x54, 0x07, - 0xb4, 0xdf, 0x33, 0x95, 0x88, 0x92, 0x48, 0x43, 0x5e, 0x66, 0x73, 0xc1, 0x95, 0xae, 0x43, 0x59, - 0x37, 0xdc, 0xe8, 0x04, 0x29, 0xea, 0x46, 0x10, 0xbb, 0x58, 0x84, 0x3c, 0x6f, 0xa9, 0x38, 0xba, - 0xf6, 0xff, 0x71, 0x98, 0x1c, 0xae, 0xbf, 0xe3, 0xf5, 0xfb, 0x8a, 0x7d, 0x48, 0xeb, 0x6c, 0xd7, - 0xf4, 0x8c, 0x28, 0x0d, 0x70, 0x99, 0xcd, 0x04, 0xd7, 0x9f, 0x83, 0xb2, 0xe3, 0xf5, 0xa3, 0xf6, - 0x6c, 0xd1, 0xf1, 0xfa, 0x41, 0xe4, 0x87, 0x50, 0xfa, 0xd8, 0xa3, 0x5d, 0x75, 0x8f, 0xf8, 0xf5, - 0x8d, 0xa7, 0xe8, 0xad, 0xe8, 0x14, 0x1d, 0xd1, 0xaa, 0xce, 0x1c, 0xd7, 0x70, 0xff, 0x41, 0x48, - 0xc0, 0x45, 0x5f, 0x16, 0x2f, 0x7d, 0x95, 0x7f, 0x86, 0xd2, 0x31, 0x08, 0x6d, 0x10, 0x7d, 0x10, - 0x53, 0x5f, 0xc2, 0x83, 0x31, 0x35, 0x32, 0xe8, 0x8a, 0x11, 0xc5, 0xcb, 0x6c, 0x26, 0xb8, 0x6d, - 0xbf, 0x88, 0x43, 0x61, 0x64, 0xd7, 0x44, 0xd6, 0xee, 0x77, 0x20, 0xc5, 0xa5, 0x8d, 0x7f, 0xef, - 0x1c, 0x11, 0x22, 0x9a, 0x9b, 0xb5, 0x18, 0x16, 0x7c, 0xe8, 0x65, 0xc8, 0xf3, 0x62, 0x20, 0x12, - 0x27, 0x21, 0x4a, 0x42, 0x8e, 0x53, 0x99, 0x81, 0x95, 0xff, 0x95, 0x20, 0x25, 0x0e, 0xb5, 0x5b, - 0x83, 0xc7, 0x8f, 0x40, 0x5f, 0x12, 0x55, 0xb4, 0x61, 0x58, 0xb4, 0x23, 0x8f, 0xb9, 0xc4, 0xc8, - 0x31, 0x87, 0xee, 0xc2, 0xf9, 0xae, 0x62, 0xc8, 0x7b, 0x44, 0xfe, 0xc8, 0x31, 0x0d, 0x99, 0x18, - 0x5d, 0x53, 0x25, 0xaa, 0xac, 0xd8, 0xb6, 0x72, 0x28, 0xbe, 0xe0, 0x4c, 0x77, 0x15, 0x63, 0x91, - 0xac, 0x3b, 0xa6, 0xd1, 0xe4, 0xb3, 0x0d, 0x3a, 0xb9, 0x98, 0x16, 0x6f, 0x3b, 0xb5, 0x2f, 0xe3, - 0x00, 0xc3, 0x28, 0x46, 0xfa, 0xeb, 0x32, 0xbb, 0x16, 0x75, 0x6d, 0x9d, 0xdd, 0xa6, 0xc4, 0x6b, - 0x50, 0x90, 0x44, 0xb9, 0x3c, 0x43, 0x77, 0xb9, 0x1f, 0x30, 0xfb, 0x7d, 0xac, 0xc8, 0x25, 0xff, - 0x4c, 0xc7, 0xcc, 0x44, 0xf4, 0x31, 0xf3, 0x06, 0x4c, 0x68, 0x74, 0x5b, 0xce, 0x10, 0x16, 0xd1, - 0x97, 0x5e, 0x94, 0xa9, 0x6c, 0xff, 0xae, 0xc5, 0x30, 0xe7, 0x40, 0x6f, 0x43, 0xda, 0xe1, 0xb9, - 0x3b, 0xb3, 0x3f, 0xee, 0xfd, 0x39, 0x94, 0xe6, 0x6b, 0x31, 0xec, 0x73, 0xd1, 0x22, 0xa1, 0x2a, - 0xae, 0x52, 0xfb, 0x9d, 0x04, 0x88, 0x3d, 0xe6, 0x19, 0xaa, 0x65, 0xb2, 0x1d, 0x6d, 0xec, 0xeb, - 0x1a, 0x3a, 0x0f, 0x09, 0xcf, 0xee, 0x71, 0x87, 0x2e, 0xa6, 0x8f, 0x9e, 0xce, 0x26, 0x76, 0xf1, - 0x06, 0xa6, 0x34, 0xf4, 0x2e, 0xa4, 0x1f, 0x10, 0x45, 0x25, 0xb6, 0xdf, 0x41, 0xdc, 0x1c, 0xf3, - 0x3c, 0x38, 0x22, 0xb1, 0xbe, 0xc6, 0x79, 0xc4, 0x7b, 0x9e, 0x90, 0x40, 0x77, 0x91, 0x6e, 0x38, - 0xa4, 0xeb, 0xd9, 0xfe, 0xc7, 0xbb, 0xc1, 0x18, 0xcd, 0x40, 0x9a, 0x7a, 0xcc, 0xf4, 0x5c, 0x71, - 0x80, 0xfa, 0xc3, 0xca, 0x3d, 0xc8, 0x07, 0xc5, 0x7d, 0x9f, 0x57, 0xc0, 0x5a, 0x1b, 0xf2, 0x54, - 0x3b, 0x4c, 0xf8, 0xe3, 0xc9, 0x9f, 0xdc, 0x58, 0xd4, 0x7e, 0x1a, 0x87, 0xb3, 0xd1, 0xcf, 0xa1, - 0x68, 0x13, 0x4a, 0x44, 0x78, 0x81, 0x76, 0xe5, 0xfb, 0xba, 0xff, 0x09, 0xf1, 0xca, 0x69, 0x5c, - 0x86, 0x8b, 0x64, 0x34, 0x28, 0xf7, 0x20, 0x63, 0x0b, 0xb5, 0x45, 0x11, 0xa8, 0x46, 0xcb, 0xf1, - 0x8d, 0xc3, 0x03, 0x3c, 0xba, 0x03, 0xe9, 0x3e, 0xcb, 0x05, 0xbf, 0x2e, 0x5e, 0x7c, 0x51, 0xc2, - 0x60, 0x1f, 0x8c, 0x6e, 0xc0, 0x04, 0x3d, 0x24, 0xfd, 0xbd, 0x50, 0x89, 0xe6, 0xa2, 0xa7, 0x21, - 0xe6, 0x40, 0xf4, 0x1a, 0x24, 0x7b, 0xa6, 0xe6, 0x7f, 0x7c, 0x3c, 0x1f, 0xcd, 0xb0, 0x61, 0x6a, - 0x98, 0xc1, 0x6a, 0x3f, 0x93, 0xa0, 0x7c, 0xfc, 0x2a, 0x8b, 0xde, 0x84, 0x4c, 0xd7, 0x34, 0x1c, - 0x57, 0x31, 0x5c, 0xe1, 0xb1, 0x17, 0xb7, 0xa9, 0x6b, 0x31, 0x3c, 0x60, 0x40, 0x0b, 0xc7, 0x2a, - 0xe5, 0xd8, 0xeb, 0x69, 0xa0, 0x36, 0x2e, 0x40, 0x72, 0xdf, 0x33, 0xba, 0xe2, 0x23, 0xd0, 0xc5, - 0x71, 0x8b, 0xad, 0x78, 0x46, 0x77, 0x2d, 0x86, 0x19, 0x76, 0x58, 0x8d, 0x7e, 0x1e, 0x87, 0x5c, - 0x40, 0x19, 0x34, 0x0f, 0x59, 0xba, 0xb7, 0x4e, 0x2a, 0x9b, 0x19, 0x55, 0xfc, 0x42, 0xb3, 0x00, - 0x7b, 0xa6, 0xd9, 0x93, 0x87, 0x29, 0x9b, 0x59, 0x8b, 0xe1, 0x2c, 0xa5, 0x71, 0x89, 0x2f, 0x41, - 0x4e, 0x37, 0xdc, 0x3b, 0xb7, 0x03, 0x95, 0x9b, 0x1e, 0xc1, 0xa0, 0x0f, 0xde, 0x70, 0xd1, 0x55, - 0x28, 0xb0, 0xe3, 0x7b, 0x00, 0xa2, 0x7b, 0x46, 0x5a, 0x8b, 0xe1, 0xbc, 0x20, 0x73, 0xd8, 0xf1, - 0x43, 0x60, 0x22, 0xe2, 0x10, 0x40, 0x73, 0xc0, 0x6a, 0xd5, 0x9d, 0xdb, 0xb2, 0xe1, 0x08, 0x5c, - 0x4a, 0x2c, 0x59, 0xe0, 0x13, 0x5b, 0x0e, 0x47, 0xde, 0x85, 0x82, 0xa7, 0x1b, 0xee, 0xcd, 0x85, - 0xbb, 0x02, 0xc7, 0xbf, 0xb1, 0x4c, 0x0e, 0xcd, 0xdd, 0x6d, 0xb1, 0x69, 0xf6, 0xed, 0x82, 0x23, - 0x79, 0x97, 0xe2, 0x7b, 0x6f, 0x3d, 0x99, 0xc9, 0x94, 0xb3, 0xb5, 0x6f, 0x24, 0x80, 0xa1, 0x8f, - 0x23, 0x2b, 0xfa, 0x3d, 0xc8, 0xea, 0x86, 0xee, 0xca, 0x8a, 0xad, 0x9d, 0xf2, 0xf2, 0x92, 0xa1, - 0xf8, 0x86, 0xad, 0x39, 0xe8, 0x0e, 0x24, 0x19, 0x5b, 0xe2, 0xd4, 0x2f, 0x5f, 0x0c, 0x2f, 0x3e, - 0x77, 0xf2, 0xf2, 0x13, 0xd7, 0x55, 0x74, 0x0f, 0x4a, 0x94, 0x2e, 0x0f, 0xe2, 0xcb, 0xf3, 0x3c, - 0x3a, 0xc0, 0x05, 0x0a, 0xf5, 0x47, 0x4e, 0xed, 0xf7, 0x71, 0x38, 0x13, 0xf1, 0xcc, 0x35, 0xb0, - 0x35, 0x31, 0xce, 0xd6, 0xe4, 0xf7, 0xb3, 0xf5, 0x2d, 0x61, 0x2b, 0xdf, 0x80, 0xaf, 0x9c, 0xea, - 0xad, 0xad, 0xde, 0xb0, 0xb5, 0x11, 0x93, 0x53, 0x2f, 0x32, 0x39, 0x7d, 0x4a, 0x93, 0x2b, 0xff, - 0x06, 0x89, 0x86, 0xad, 0xfd, 0xc5, 0xb7, 0xf3, 0x70, 0x6b, 0x2e, 0x0c, 0xba, 0x19, 0xea, 0x65, - 0x53, 0x25, 0xe2, 0x6a, 0xce, 0x7e, 0xd3, 0x53, 0x22, 0x78, 0x19, 0xe7, 0x83, 0xeb, 0xbf, 0x8d, - 0x43, 0x3e, 0xf8, 0xe5, 0x19, 0x9d, 0x87, 0xe9, 0xf6, 0x76, 0x13, 0x37, 0x3a, 0x6d, 0x2c, 0x77, - 0x3e, 0xd8, 0x6e, 0xca, 0xbb, 0x5b, 0xef, 0x6e, 0xb5, 0xdf, 0xdf, 0x2a, 0xc7, 0xd0, 0x05, 0x38, - 0xbb, 0xd9, 0xdc, 0x6c, 0xe3, 0x0f, 0xe4, 0x9d, 0xf6, 0x2e, 0x5e, 0x6a, 0xca, 0x3e, 0xb0, 0xfc, - 0x3c, 0x8d, 0xce, 0xc3, 0xd4, 0x2a, 0xde, 0x5e, 0x0a, 0x4d, 0xfd, 0x2a, 0x43, 0xa7, 0xe8, 0x9d, - 0x3d, 0x34, 0xf5, 0x45, 0x16, 0x55, 0x60, 0xba, 0xb9, 0xb9, 0xdd, 0x09, 0x4b, 0xfc, 0x6f, 0x40, - 0x93, 0x90, 0xdf, 0x6c, 0x6c, 0x0f, 0x49, 0x4f, 0x4a, 0xe8, 0x1c, 0xa0, 0xc6, 0xea, 0x2a, 0x6e, - 0xae, 0x36, 0x3a, 0x01, 0xec, 0x4f, 0xca, 0x68, 0x0a, 0x4a, 0x2b, 0xad, 0x8d, 0x4e, 0x13, 0x0f, - 0xa9, 0xff, 0x33, 0x89, 0xce, 0x40, 0x71, 0xa3, 0xb5, 0xd9, 0xea, 0x0c, 0x89, 0x7f, 0x60, 0xc4, - 0xdd, 0xad, 0x56, 0x7b, 0x6b, 0x48, 0xfc, 0x06, 0x21, 0x04, 0x85, 0xf5, 0x76, 0x2b, 0x40, 0xfb, - 0xc5, 0x19, 0xaa, 0xb6, 0x6f, 0x6e, 0x6b, 0xeb, 0xdd, 0xe1, 0xd4, 0xe7, 0x2b, 0x54, 0x0f, 0x6e, - 0xec, 0xc8, 0xc4, 0x67, 0xab, 0xa8, 0x0a, 0xe7, 0xdb, 0x9d, 0xe6, 0x86, 0xdc, 0xfc, 0xc7, 0xed, - 0x36, 0xee, 0x1c, 0x9b, 0xff, 0x6e, 0x75, 0xf1, 0xfe, 0x93, 0x67, 0xd5, 0xd8, 0xd7, 0xcf, 0xaa, - 0xb1, 0xef, 0x9e, 0x55, 0xa5, 0x4f, 0x8f, 0xaa, 0xd2, 0xe7, 0x47, 0x55, 0xe9, 0x97, 0x47, 0x55, - 0xe9, 0xc9, 0x51, 0x55, 0xfa, 0xe6, 0xa8, 0x2a, 0x3d, 0x3f, 0xaa, 0xc6, 0xbe, 0x3b, 0xaa, 0x4a, - 0xff, 0xf5, 0x6d, 0x35, 0xf6, 0xe4, 0xdb, 0x6a, 0xec, 0xeb, 0x6f, 0xab, 0xb1, 0x7f, 0x4a, 0xf1, - 0xd0, 0xef, 0xa5, 0xd8, 0xf7, 0xac, 0x5b, 0x7f, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x9b, 0xb9, 0x47, - 0x05, 0xdc, 0x24, 0x00, 0x00, + // 3683 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3a, 0x4b, 0x6c, 0x1c, 0x47, + 0x76, 0x33, 0xd3, 0xf3, 0x7d, 0xf3, 0x65, 0x89, 0x94, 0xa9, 0xb1, 0x35, 0x94, 0xdb, 0x72, 0x2c, + 0x2b, 0x5e, 0xca, 0xa6, 0x65, 0x45, 0x2b, 0x6b, 0xe3, 0x1d, 0x92, 0x43, 0x72, 0x64, 0x92, 0xc3, + 0x14, 0x87, 0xbb, 0xd9, 0x64, 0xe1, 0x46, 0x73, 0xba, 0xd8, 0x6a, 0x6b, 0xa6, 0xbb, 0xdd, 0x1f, + 0x8b, 0x34, 0x10, 0x64, 0x73, 0xcb, 0x61, 0x0f, 0x39, 0xe4, 0x10, 0xe4, 0x94, 0x4b, 0x02, 0x23, + 0x40, 0x82, 0x05, 0x92, 0x63, 0x0e, 0x09, 0x12, 0x60, 0x73, 0x08, 0x16, 0xce, 0xe7, 0xb0, 0x27, + 0xc1, 0xa6, 0x2f, 0x3e, 0x05, 0xce, 0x2d, 0x87, 0x1c, 0x82, 0xfa, 0x74, 0x4f, 0xf7, 0x74, 0x0f, + 0x45, 0x3b, 0x41, 0x80, 0x1c, 0x6c, 0x4e, 0xbd, 0x5f, 0xbd, 0x5f, 0xbd, 0xf7, 0xaa, 0x5a, 0xf0, + 0x92, 0xeb, 0x8c, 0xee, 0x8c, 0x54, 0xc7, 0xb4, 0xbc, 0x3b, 0xf6, 0x58, 0x35, 0xed, 0x63, 0xf6, + 0x67, 0xd5, 0x76, 0x2c, 0xcf, 0x42, 0x2d, 0xfb, 0x74, 0x95, 0x23, 0x57, 0x39, 0xb2, 0xbd, 0xa8, + 0x5b, 0xba, 0xc5, 0x90, 0x77, 0xe8, 0x2f, 0x4e, 0xd7, 0xee, 0xe8, 0x96, 0xa5, 0x8f, 0xc9, 0x1d, + 0xb6, 0x3a, 0xf6, 0x4f, 0xee, 0x3c, 0x75, 0x54, 0xdb, 0x26, 0x8e, 0x2b, 0xf0, 0x2b, 0x74, 0x17, + 0xd5, 0x36, 0x38, 0xc1, 0x1d, 0xdf, 0x37, 0x34, 0xfb, 0x98, 0xfd, 0x11, 0x04, 0x37, 0x29, 0x81, + 0xfb, 0x58, 0x75, 0x88, 0x76, 0xc7, 0x3b, 0xb3, 0x89, 0xcb, 0xff, 0x6f, 0x1f, 0xf3, 0xbf, 0x9c, + 0x4a, 0xfe, 0xbd, 0x2c, 0x54, 0x0f, 0xc6, 0xaa, 0x39, 0xb0, 0x3d, 0xc3, 0x32, 0x5d, 0xb4, 0x0c, + 0x25, 0x72, 0x6a, 0x8f, 0x55, 0xc3, 0x5c, 0xce, 0xdd, 0xc8, 0xde, 0x2a, 0xe3, 0x60, 0x49, 0x31, + 0xaa, 0xa9, 0x8e, 0xcf, 0x3e, 0x21, 0xcb, 0x12, 0xc7, 0x88, 0x25, 0xba, 0x0f, 0xd7, 0x26, 0xea, + 0xa9, 0x62, 0xf9, 0x9e, 0xed, 0x7b, 0x8a, 0x63, 0x3d, 0x75, 0x15, 0x9b, 0x38, 0x8a, 0xa7, 0x1e, + 0x8f, 0xc9, 0x72, 0xfe, 0x46, 0xf6, 0x96, 0x84, 0x97, 0x26, 0xea, 0xe9, 0x80, 0xe1, 0xb1, 0xf5, + 0xd4, 0x3d, 0x20, 0xce, 0x90, 0x22, 0x1f, 0xe5, 0xcb, 0xd9, 0x56, 0x4e, 0xfe, 0x42, 0x82, 0x3c, + 0xd5, 0x01, 0xbd, 0x06, 0x92, 0xa6, 0xea, 0xcb, 0xd9, 0x1b, 0xd9, 0x5b, 0xd5, 0xb5, 0xa5, 0xd5, + 0x59, 0x4f, 0xad, 0x6e, 0x76, 0xb7, 0x31, 0xa5, 0x40, 0x77, 0xa1, 0x60, 0x5a, 0x1a, 0x71, 0x97, + 0x73, 0x37, 0xa4, 0x5b, 0xd5, 0xb5, 0x4e, 0x92, 0x94, 0xca, 0xdb, 0x72, 0x54, 0x7d, 0x42, 0x4c, + 0x0f, 0x73, 0x62, 0xf4, 0x7d, 0xa8, 0x51, 0xac, 0x62, 0x71, 0x5b, 0x99, 0x6a, 0xd5, 0xb5, 0xeb, + 0xe9, 0xcc, 0xc2, 0x21, 0xb8, 0x6a, 0x47, 0xbc, 0x73, 0x08, 0xc8, 0x30, 0x47, 0xd6, 0xc4, 0x30, + 0x75, 0x45, 0xd5, 0x89, 0xe9, 0x29, 0x86, 0xe6, 0x2e, 0x17, 0x98, 0x12, 0x4d, 0x2a, 0x87, 0x87, + 0x61, 0xf5, 0xe8, 0xa8, 0xbf, 0xb9, 0xbe, 0x78, 0xfe, 0x6c, 0xa5, 0xd5, 0x17, 0xe4, 0x5d, 0x4a, + 0xdd, 0xdf, 0x74, 0x71, 0xcb, 0x88, 0x41, 0x34, 0x17, 0xf9, 0x70, 0x9d, 0x9c, 0x92, 0x91, 0x4f, + 0xb7, 0x50, 0x5c, 0x4f, 0xf5, 0x7c, 0x57, 0xd1, 0x88, 0xeb, 0x19, 0xa6, 0xca, 0xf5, 0x2c, 0x32, + 0xf9, 0x6f, 0xa5, 0xeb, 0xb9, 0xda, 0x0b, 0x78, 0x0f, 0x19, 0xeb, 0xe6, 0x94, 0x13, 0xbf, 0x48, + 0xe6, 0xe2, 0xdc, 0xf6, 0x09, 0xb4, 0xe7, 0xb3, 0xa2, 0x97, 0xa1, 0xa6, 0x3b, 0xf6, 0x48, 0x51, + 0x35, 0xcd, 0x21, 0xae, 0xcb, 0x62, 0x52, 0xc1, 0x55, 0x0a, 0xeb, 0x72, 0x10, 0x7a, 0x15, 0x1a, + 0xae, 0x3b, 0x56, 0x3c, 0xd5, 0xd1, 0x89, 0x67, 0xaa, 0x13, 0xc2, 0x32, 0xa6, 0x82, 0xeb, 0xae, + 0x3b, 0x1e, 0x86, 0xc0, 0x47, 0xf9, 0xb2, 0xd4, 0xca, 0xcb, 0x67, 0x50, 0x8b, 0x86, 0x04, 0x35, + 0x20, 0x67, 0x68, 0x4c, 0x6a, 0x1e, 0xe7, 0x0c, 0x2d, 0x08, 0x7d, 0xee, 0xb9, 0xa1, 0x7f, 0x33, + 0x08, 0xbd, 0xc4, 0xbc, 0xd2, 0x4e, 0xf7, 0xca, 0xbe, 0xa5, 0x11, 0x11, 0x76, 0xf9, 0x4f, 0xb3, + 0x20, 0x6d, 0x76, 0xb7, 0xd1, 0xdb, 0x01, 0x67, 0x96, 0x71, 0x5e, 0x4f, 0xdd, 0x84, 0xfe, 0x17, + 0x61, 0x6e, 0x1b, 0x50, 0x12, 0x90, 0x84, 0xca, 0xd4, 0x7e, 0xcb, 0xf1, 0x88, 0xa6, 0xd8, 0xaa, + 0x43, 0x4c, 0x8f, 0x26, 0x94, 0x74, 0x2b, 0x8f, 0xeb, 0x1c, 0x7a, 0xc0, 0x81, 0xe8, 0x35, 0x68, + 0x0a, 0xb2, 0xd1, 0x63, 0x63, 0xac, 0x39, 0xc4, 0x64, 0xaa, 0xe7, 0xb1, 0xe0, 0xde, 0x10, 0x50, + 0x79, 0x0b, 0xca, 0x81, 0xea, 0x89, 0xbd, 0x6e, 0x43, 0xce, 0xb2, 0x85, 0x77, 0x52, 0x4c, 0x1e, + 0xd8, 0xc4, 0x51, 0x3d, 0xcb, 0xc1, 0x39, 0xcb, 0x96, 0xff, 0xbe, 0x02, 0xe5, 0x00, 0x80, 0x7e, + 0x0d, 0x4a, 0x96, 0xad, 0xd0, 0x13, 0xcf, 0xa4, 0x35, 0xd2, 0xce, 0x4a, 0x40, 0x3c, 0x3c, 0xb3, + 0x09, 0x2e, 0x5a, 0x36, 0xfd, 0x8b, 0x76, 0xa1, 0x3e, 0x21, 0x13, 0xc5, 0xb5, 0x7c, 0x67, 0x44, + 0x94, 0x70, 0xf3, 0x5f, 0x49, 0xb2, 0xef, 0x91, 0x89, 0xe5, 0x9c, 0x1d, 0x32, 0xc2, 0x40, 0xd4, + 0x4e, 0x06, 0x57, 0x27, 0x64, 0x12, 0x00, 0xd1, 0x3d, 0x28, 0x4e, 0x54, 0x9b, 0x8a, 0x91, 0xe6, + 0x1d, 0xba, 0x3d, 0xd5, 0x8e, 0x70, 0x17, 0x26, 0x74, 0x89, 0x1e, 0x42, 0x51, 0xd5, 0x75, 0xca, + 0xc7, 0x0f, 0xeb, 0x2b, 0x49, 0xbe, 0xae, 0xae, 0x3b, 0x44, 0x57, 0xbd, 0xe8, 0xde, 0x05, 0x55, + 0xd7, 0x07, 0x36, 0xda, 0x82, 0x2a, 0xb3, 0xc1, 0x30, 0x9f, 0x50, 0x11, 0x05, 0x26, 0xe2, 0xe6, + 0x5c, 0x0b, 0x0c, 0xf3, 0x49, 0x44, 0x46, 0x85, 0xea, 0xcf, 0x40, 0xe8, 0x3d, 0xa8, 0x9c, 0x18, + 0x63, 0x8f, 0x38, 0x54, 0x4a, 0x91, 0x49, 0xb9, 0x91, 0x94, 0xb2, 0xc5, 0x48, 0x22, 0x12, 0xca, + 0x27, 0x02, 0x82, 0x1e, 0x42, 0x79, 0x6c, 0x4c, 0x0c, 0x8f, 0xf2, 0x97, 0x18, 0xff, 0x4a, 0x92, + 0x7f, 0x97, 0x52, 0x44, 0xd8, 0x4b, 0x63, 0x0e, 0xa0, 0xdc, 0xbe, 0x49, 0x8b, 0x83, 0x65, 0x2f, + 0x97, 0xe7, 0x71, 0x1f, 0x51, 0x8a, 0x28, 0xb7, 0xcf, 0x01, 0xe8, 0x03, 0x68, 0xb0, 0x93, 0x3c, + 0x8d, 0x64, 0x65, 0x9e, 0x1f, 0xb6, 0xf1, 0xc1, 0x46, 0x3c, 0x8e, 0xeb, 0xad, 0xf3, 0x67, 0x2b, + 0xb5, 0x28, 0x7c, 0x27, 0x83, 0x59, 0x65, 0x08, 0x43, 0xfb, 0x43, 0x51, 0x29, 0x02, 0x2f, 0x7f, + 0xc5, 0x0d, 0x94, 0xe7, 0x88, 0x8f, 0x38, 0x79, 0xbd, 0x71, 0xfe, 0x6c, 0x05, 0xa6, 0xd0, 0x9d, + 0x0c, 0x06, 0x26, 0x9a, 0x7b, 0xfd, 0xbb, 0x50, 0xfa, 0xd0, 0x32, 0x98, 0xd5, 0x55, 0x26, 0x32, + 0x25, 0x75, 0x1f, 0x59, 0x46, 0xd4, 0xe8, 0xe2, 0x87, 0x6c, 0x8d, 0x76, 0xa1, 0xe1, 0x6b, 0xde, + 0x49, 0xc4, 0xe6, 0xda, 0x3c, 0x9b, 0x8f, 0x36, 0x87, 0x5b, 0x89, 0xdc, 0xad, 0x51, 0xee, 0xd0, + 0xc2, 0x01, 0x34, 0xc9, 0xc4, 0xf6, 0xce, 0x22, 0xe2, 0xea, 0x4c, 0xdc, 0xab, 0x49, 0x71, 0x3d, + 0x4a, 0x98, 0x90, 0x57, 0x27, 0x51, 0x30, 0xfa, 0x31, 0xd4, 0x2c, 0x8f, 0x8c, 0x43, 0x97, 0x35, + 0x98, 0xb4, 0x5b, 0x29, 0x27, 0x73, 0x48, 0xc6, 0xbd, 0x53, 0xdb, 0x72, 0xbc, 0xa4, 0xdf, 0x28, + 0x6e, 0xea, 0x37, 0x2a, 0x4f, 0xf8, 0xed, 0x03, 0x58, 0x1c, 0x8d, 0x8d, 0xd1, 0x93, 0xc7, 0x96, + 0xef, 0x92, 0x88, 0xce, 0x4d, 0xb6, 0xcb, 0xed, 0xe4, 0x2e, 0x1b, 0x94, 0x7a, 0x87, 0x52, 0x27, + 0x14, 0x47, 0x53, 0x49, 0xa1, 0xf6, 0x1f, 0x00, 0x8a, 0xca, 0x17, 0x36, 0xb4, 0x98, 0xf4, 0xd5, + 0x8b, 0xa4, 0x27, 0x2d, 0xd9, 0xc9, 0xe0, 0x56, 0x64, 0x07, 0x86, 0x59, 0xcf, 0xd3, 0x5a, 0x27, + 0xff, 0x73, 0x0e, 0x16, 0xd3, 0x2a, 0x0b, 0x42, 0x90, 0x67, 0xcd, 0x86, 0x77, 0x24, 0xf6, 0x1b, + 0xad, 0x40, 0x75, 0x64, 0x8d, 0xfd, 0x89, 0xa9, 0x18, 0xda, 0x29, 0x9f, 0x0a, 0x24, 0x0c, 0x1c, + 0xd4, 0xd7, 0x4e, 0x5d, 0xda, 0xce, 0x04, 0x01, 0xa5, 0xe7, 0xcd, 0xa3, 0x82, 0x05, 0xd3, 0x3e, + 0x05, 0xa1, 0x77, 0x42, 0x12, 0x36, 0x1f, 0xb1, 0x62, 0xde, 0x58, 0x43, 0xd4, 0x20, 0x3e, 0x30, + 0x6d, 0xaa, 0x9e, 0xca, 0x4a, 0xa4, 0x60, 0xa3, 0xbf, 0x5d, 0xf4, 0x00, 0xc0, 0xf5, 0x54, 0xc7, + 0x53, 0x3c, 0x63, 0x42, 0x44, 0x89, 0x79, 0x71, 0x95, 0x0f, 0x6f, 0xab, 0xc1, 0xf0, 0xb6, 0xda, + 0x37, 0xbd, 0x7b, 0x77, 0x7f, 0xa0, 0x8e, 0x7d, 0x82, 0x2b, 0x8c, 0x7c, 0x68, 0x4c, 0xe8, 0xe0, + 0x54, 0x71, 0x3d, 0x5a, 0x9e, 0x29, 0x6b, 0xf1, 0xf9, 0xac, 0x65, 0x4a, 0xcd, 0x38, 0xaf, 0x42, + 0x91, 0x8d, 0x57, 0x1e, 0x2b, 0x27, 0x15, 0x2c, 0x56, 0xe8, 0x25, 0x2a, 0xd1, 0x21, 0x2a, 0x1d, + 0x30, 0x58, 0xad, 0x28, 0xe3, 0x29, 0x40, 0xfe, 0x45, 0x16, 0x50, 0xb2, 0xd6, 0xa5, 0x7a, 0x74, + 0xd6, 0x1b, 0xb9, 0xcb, 0x79, 0xe3, 0x12, 0x7e, 0x7e, 0x04, 0x4b, 0x82, 0xc4, 0x25, 0x13, 0xd5, + 0xf4, 0x8c, 0x51, 0xcc, 0xe1, 0x57, 0xa7, 0x5b, 0x1c, 0x0a, 0x3c, 0xdb, 0xe6, 0x0a, 0x67, 0x8a, + 0xc2, 0x5c, 0xd9, 0x04, 0x94, 0xac, 0x59, 0x09, 0xdd, 0xb3, 0xdf, 0x4e, 0xf7, 0x5c, 0x42, 0x77, + 0xf9, 0x17, 0x79, 0x68, 0xcd, 0x56, 0x31, 0x36, 0x18, 0xc7, 0xa6, 0xa4, 0x60, 0x89, 0xee, 0xc7, + 0x4b, 0xaf, 0xa1, 0xb1, 0xee, 0x97, 0x9f, 0x2d, 0xaa, 0xfd, 0xcd, 0x78, 0x51, 0xed, 0x6b, 0xe8, + 0x10, 0x6a, 0x62, 0x9c, 0x9e, 0x4e, 0xd1, 0xa9, 0xa7, 0x6b, 0x56, 0x9b, 0x55, 0x4c, 0x5c, 0x7f, + 0xec, 0xb1, 0xf1, 0x9a, 0x36, 0x61, 0x2e, 0x85, 0x2d, 0x91, 0x0e, 0x68, 0x64, 0x99, 0x26, 0x19, + 0x79, 0xbc, 0x99, 0xf0, 0xe9, 0x92, 0xa7, 0xec, 0xfd, 0x4b, 0x88, 0xa6, 0x80, 0x8d, 0x50, 0x40, + 0x30, 0x20, 0x2f, 0x8c, 0x66, 0x41, 0xed, 0x7f, 0xc9, 0x42, 0x35, 0xa2, 0x07, 0xba, 0x0e, 0xc0, + 0xcc, 0x50, 0x22, 0x69, 0x56, 0x61, 0x90, 0xfd, 0xff, 0x37, 0xb9, 0xd6, 0xfe, 0x75, 0x58, 0x4a, + 0x75, 0x40, 0xca, 0x1c, 0x9c, 0x4d, 0x99, 0x83, 0xd7, 0xeb, 0x50, 0x8d, 0x4c, 0xf5, 0x8f, 0xf2, + 0xe5, 0x5c, 0x4b, 0x92, 0x3f, 0x86, 0x6a, 0x64, 0xee, 0x41, 0x9b, 0x50, 0x25, 0xa7, 0x36, 0xcd, + 0x1d, 0x16, 0x1a, 0x3e, 0xa8, 0xa6, 0x74, 0xd2, 0xc3, 0x91, 0x3a, 0x56, 0x9d, 0x5e, 0x48, 0x8a, + 0xa3, 0x6c, 0x97, 0x49, 0xe4, 0xbf, 0xcc, 0xc1, 0x42, 0x62, 0x70, 0x42, 0xdf, 0x83, 0xe2, 0xc7, + 0xb4, 0xd0, 0x04, 0x3b, 0xbf, 0x7a, 0xc1, 0xb4, 0x15, 0xd9, 0x5c, 0x30, 0xa1, 0x37, 0xa1, 0xa8, + 0x3b, 0x96, 0x6f, 0x07, 0xd7, 0xb2, 0xe5, 0x94, 0x66, 0xc0, 0x74, 0xc0, 0x82, 0x8e, 0xd6, 0x6d, + 0xf6, 0x2b, 0x16, 0x41, 0x60, 0x20, 0x1e, 0xc0, 0x15, 0xa8, 0x32, 0xe1, 0x82, 0x20, 0xcf, 0x09, + 0x18, 0x88, 0x13, 0xb4, 0xa1, 0xfc, 0xd4, 0x30, 0x35, 0xeb, 0x29, 0xd1, 0x58, 0x26, 0x97, 0x71, + 0xb8, 0xa6, 0xcc, 0xb6, 0xea, 0x78, 0x86, 0x3a, 0x56, 0x54, 0x5d, 0x67, 0x05, 0xb6, 0x8c, 0x41, + 0x80, 0xba, 0xba, 0x8e, 0x5e, 0x87, 0xd6, 0x89, 0x61, 0xaa, 0x63, 0xe3, 0x13, 0xa2, 0x38, 0x2c, + 0x5f, 0x5d, 0x56, 0x4f, 0xcb, 0xb8, 0x19, 0xc0, 0x79, 0x1a, 0xbb, 0xf2, 0xef, 0x67, 0xa1, 0x11, + 0x1f, 0xf0, 0xd0, 0x3a, 0xc0, 0xd4, 0xeb, 0xe2, 0xd2, 0x7a, 0x99, 0x58, 0x45, 0xb8, 0xd0, 0x1a, + 0x94, 0x78, 0x58, 0x9e, 0xef, 0xb3, 0x80, 0x50, 0xfe, 0x49, 0x16, 0xea, 0xb1, 0x59, 0x11, 0x2d, + 0x42, 0x81, 0xcd, 0x8a, 0x4c, 0x09, 0x09, 0xf3, 0xc5, 0xb7, 0x91, 0x4d, 0x73, 0x59, 0x3d, 0xb6, + 0x1c, 0x7e, 0x5a, 0x5d, 0x67, 0xe4, 0x8a, 0xbb, 0x4a, 0x3d, 0x84, 0x1e, 0x3a, 0x23, 0x57, 0xfe, + 0x2a, 0x0b, 0xf5, 0xd8, 0xc0, 0x99, 0xc8, 0xb9, 0x6c, 0xf2, 0x30, 0xfe, 0x00, 0x9a, 0x82, 0x64, + 0xa2, 0xda, 0xb6, 0x61, 0xea, 0x81, 0x5e, 0xdf, 0x79, 0xce, 0x34, 0x2b, 0xb4, 0xdc, 0xe3, 0x5c, + 0xb8, 0x31, 0x8a, 0x2e, 0x5d, 0x74, 0x13, 0x1a, 0xe1, 0x9b, 0xc3, 0xb1, 0xea, 0x8d, 0x1e, 0xf3, + 0x2a, 0x8b, 0x6b, 0x0e, 0x7f, 0x6a, 0x58, 0xa7, 0xb0, 0xf6, 0x3d, 0xa8, 0xc7, 0xc4, 0x50, 0x53, + 0x83, 0x99, 0xc1, 0xd4, 0xc8, 0xa9, 0xd0, 0x59, 0xc2, 0x75, 0x31, 0x36, 0x70, 0xa0, 0xfc, 0xf3, + 0x3c, 0xd4, 0xa2, 0x53, 0x26, 0x7a, 0x17, 0xf2, 0x91, 0xeb, 0xd4, 0x6b, 0x17, 0xcf, 0xa4, 0x6c, + 0xc1, 0x6a, 0x0a, 0x63, 0x42, 0x2a, 0x5c, 0x21, 0x1f, 0xf9, 0xea, 0xd8, 0xf0, 0xce, 0x94, 0x91, + 0x65, 0x6a, 0x06, 0xaf, 0xc1, 0xdc, 0x0f, 0x6f, 0x3e, 0x47, 0x56, 0x4f, 0x70, 0x6e, 0x04, 0x8c, + 0x18, 0x91, 0x59, 0x90, 0x8b, 0x30, 0x34, 0x44, 0xeb, 0x08, 0xa2, 0xcf, 0x6f, 0xca, 0xbf, 0xfa, + 0x1c, 0xe9, 0xfc, 0xbe, 0x2a, 0x12, 0xa2, 0xce, 0x45, 0x6c, 0x88, 0xb4, 0x98, 0x8d, 0x6e, 0x3e, + 0x19, 0xdd, 0x64, 0x14, 0x0a, 0x29, 0x51, 0x98, 0xc0, 0x42, 0xc2, 0x0a, 0x74, 0x1b, 0x16, 0xc6, + 0xe4, 0x24, 0xd0, 0x97, 0x87, 0x43, 0xdc, 0x7d, 0x9b, 0x14, 0xb1, 0x31, 0x0d, 0x08, 0x7a, 0x03, + 0x90, 0x63, 0xe8, 0x8f, 0x67, 0x88, 0x73, 0x8c, 0xb8, 0xc5, 0x30, 0x11, 0xea, 0xf6, 0x10, 0x6a, + 0x51, 0xb3, 0xa8, 0x1d, 0xfc, 0xae, 0x1e, 0xdb, 0xa4, 0xca, 0x61, 0x7c, 0x83, 0xa9, 0xa9, 0x51, + 0xd1, 0xd5, 0x48, 0x52, 0xc8, 0xef, 0x40, 0x39, 0x08, 0x2b, 0xaa, 0x40, 0xa1, 0xbf, 0xbf, 0xdf, + 0xc3, 0xad, 0x0c, 0x6a, 0x00, 0xec, 0xf6, 0xb6, 0x86, 0xca, 0xe0, 0x68, 0xd8, 0xc3, 0xad, 0x2c, + 0x5d, 0x6f, 0x1d, 0xed, 0xee, 0x8a, 0xb5, 0x24, 0x9f, 0x00, 0x4a, 0x5e, 0x36, 0x52, 0x87, 0xaf, + 0x87, 0x00, 0xaa, 0xa3, 0x2b, 0xa2, 0x16, 0xe7, 0xe6, 0x3d, 0x57, 0xf0, 0xca, 0x22, 0xa6, 0x4a, + 0xd5, 0xd1, 0xd9, 0x2f, 0x57, 0xb6, 0xe0, 0x4a, 0xca, 0x2d, 0xe4, 0x32, 0x27, 0xf4, 0xdb, 0x35, + 0x62, 0xf9, 0x5f, 0x25, 0x58, 0x9e, 0x77, 0x87, 0xa0, 0xf6, 0x3d, 0xb6, 0x5c, 0x2f, 0xb0, 0x8f, + 0xfe, 0xa6, 0x30, 0x7a, 0x13, 0x60, 0xbe, 0x2d, 0x60, 0xf6, 0x9b, 0x16, 0x72, 0xdf, 0x25, 0x0e, + 0xf3, 0x85, 0xc4, 0x68, 0xc3, 0x35, 0xc5, 0xd9, 0xaa, 0xeb, 0x3e, 0xb5, 0x1c, 0x8d, 0x4d, 0x42, + 0x15, 0x1c, 0xae, 0x29, 0x4e, 0x53, 0x3d, 0xf5, 0x58, 0x75, 0xf9, 0xf4, 0x5d, 0xc1, 0xe1, 0x9a, + 0xd6, 0xc5, 0x8f, 0x7c, 0xe2, 0x9c, 0xb1, 0xd2, 0x5f, 0xc1, 0x7c, 0x91, 0x70, 0x44, 0xe9, 0xf9, + 0x8e, 0x28, 0x5f, 0x6e, 0x22, 0xb9, 0x0e, 0xc0, 0x52, 0x5f, 0x71, 0x8d, 0x4f, 0x08, 0xbb, 0x66, + 0x17, 0x70, 0x85, 0x41, 0x0e, 0x8d, 0x4f, 0x48, 0x7c, 0x38, 0x87, 0x99, 0xe1, 0x9c, 0x36, 0x23, + 0x7a, 0x0f, 0x70, 0x3d, 0x75, 0x62, 0x8b, 0xec, 0x66, 0xf7, 0xde, 0x0a, 0x6e, 0x86, 0x70, 0x91, + 0xc6, 0xaf, 0x43, 0x8b, 0x75, 0x31, 0x36, 0xc7, 0x09, 0xd2, 0x1a, 0x27, 0x0d, 0xe1, 0x82, 0xf4, + 0x7a, 0xec, 0x7a, 0x52, 0x67, 0xfd, 0x21, 0x72, 0x03, 0xb9, 0x06, 0x65, 0x62, 0x6a, 0x1c, 0xd9, + 0x60, 0xc8, 0x12, 0x31, 0x35, 0x8a, 0x92, 0xff, 0x22, 0x07, 0x25, 0x7a, 0xc7, 0xdc, 0xb5, 0x74, + 0xf4, 0x1e, 0x80, 0xea, 0x79, 0x8e, 0x71, 0xec, 0x7b, 0xe1, 0x70, 0xb0, 0x92, 0x7e, 0x5d, 0xed, + 0x06, 0x74, 0x38, 0xc2, 0x42, 0x8f, 0x38, 0xdd, 0x23, 0x79, 0x6a, 0x25, 0x6e, 0x5d, 0xf4, 0x88, + 0xbf, 0x0b, 0x6d, 0xeb, 0xd8, 0x25, 0xce, 0xc7, 0x84, 0x2b, 0x16, 0x67, 0x92, 0x18, 0xd3, 0x0b, + 0x01, 0xc5, 0x70, 0x86, 0xf9, 0x35, 0x68, 0xba, 0xe4, 0x63, 0xe2, 0xd0, 0x02, 0x6b, 0xfa, 0x93, + 0x63, 0xe2, 0x88, 0x17, 0xe8, 0x46, 0x00, 0xde, 0x67, 0x50, 0xf4, 0x0a, 0xd4, 0x43, 0x42, 0x8f, + 0x9c, 0x7a, 0x22, 0x79, 0x6a, 0x01, 0x70, 0x48, 0x4e, 0x3d, 0xaa, 0xf6, 0xb1, 0xa5, 0x9d, 0xc5, + 0x35, 0x28, 0x72, 0xb5, 0x29, 0x22, 0xb2, 0xb3, 0xfc, 0xd3, 0x3c, 0x94, 0xd9, 0x9d, 0xdc, 0x56, + 0x69, 0xa1, 0xa9, 0xd2, 0xe4, 0x52, 0x5c, 0xcf, 0xa1, 0xc1, 0x66, 0xc9, 0x4f, 0xaf, 0xe9, 0x14, + 0x78, 0xc8, 0x60, 0xe8, 0x0d, 0x58, 0x60, 0x24, 0x49, 0x97, 0xec, 0x64, 0x70, 0x93, 0xa2, 0xa2, + 0x76, 0xc5, 0x23, 0x20, 0x7d, 0xf3, 0x08, 0x6c, 0xc2, 0x92, 0xe7, 0xa8, 0xec, 0x16, 0x12, 0xdf, + 0x92, 0xb9, 0x67, 0x7d, 0xe1, 0xfc, 0xd9, 0x4a, 0x7d, 0x48, 0x09, 0xfa, 0x9b, 0xa2, 0x07, 0x20, + 0x46, 0xdf, 0xd7, 0xa2, 0x6a, 0x74, 0x61, 0xd1, 0xb5, 0x55, 0x33, 0x21, 0xa4, 0xc0, 0x84, 0xb0, + 0x7b, 0x0d, 0xb5, 0x3f, 0x94, 0xb1, 0x40, 0xa9, 0xe3, 0x22, 0x86, 0xf0, 0xa2, 0xa8, 0xc1, 0xa9, + 0x92, 0x98, 0x77, 0xd7, 0xaf, 0x9e, 0x3f, 0x5b, 0x41, 0xbc, 0x74, 0xc7, 0xe4, 0xbd, 0x60, 0x4f, + 0x61, 0x31, 0xa9, 0xef, 0xc0, 0x0b, 0xd3, 0x3c, 0x8f, 0x4b, 0x2c, 0xb1, 0x78, 0x2d, 0x86, 0x49, + 0x1f, 0x65, 0x7b, 0x0b, 0x96, 0x82, 0xfc, 0x8f, 0x33, 0x95, 0x19, 0x13, 0x12, 0x87, 0x21, 0xca, + 0x72, 0x1d, 0xe0, 0x89, 0x61, 0x6a, 0xbc, 0x3a, 0xb3, 0x43, 0x2e, 0xe1, 0x0a, 0x85, 0xb0, 0xf2, + 0xbb, 0x5e, 0xe4, 0xf5, 0x5c, 0xfe, 0x1d, 0x68, 0xd2, 0x60, 0xec, 0x11, 0xcf, 0x31, 0x46, 0xdb, + 0xaa, 0xaf, 0x13, 0xb4, 0x0a, 0xe8, 0x64, 0x6c, 0xa9, 0x29, 0x8d, 0x8e, 0x86, 0xbc, 0xc5, 0x70, + 0xd1, 0x9d, 0x6e, 0x43, 0xcb, 0x30, 0xbd, 0xf4, 0x04, 0x69, 0x18, 0x66, 0x94, 0x76, 0xbd, 0x01, + 0x35, 0x3e, 0x28, 0x73, 0x6a, 0xf9, 0xcf, 0x72, 0xb0, 0x30, 0xdd, 0xff, 0xd0, 0x9f, 0x4c, 0x54, + 0xe7, 0x8c, 0x76, 0xcf, 0x91, 0xe5, 0x9b, 0x69, 0x1a, 0xe0, 0x16, 0xc3, 0x44, 0xf7, 0xbf, 0x05, + 0x2d, 0xd7, 0x9f, 0xa4, 0x9d, 0xd9, 0x86, 0xeb, 0x4f, 0xa2, 0x94, 0x3f, 0x86, 0xe6, 0x47, 0x3e, + 0xbd, 0x2b, 0x8d, 0x49, 0xd0, 0xb5, 0x78, 0x8a, 0xbe, 0x9d, 0x9e, 0xa2, 0x31, 0xad, 0x56, 0x99, + 0xe3, 0xba, 0xde, 0x6f, 0x08, 0x09, 0xb8, 0x11, 0xc8, 0xe2, 0x0d, 0xad, 0xfd, 0xdb, 0xd0, 0x9c, + 0x21, 0xa1, 0x55, 0x3f, 0x20, 0x62, 0xea, 0x67, 0x71, 0xb8, 0xa6, 0x46, 0x46, 0x5d, 0x11, 0x53, + 0xbc, 0xc5, 0x30, 0xd1, 0x63, 0xfb, 0xb3, 0x1c, 0xd4, 0x63, 0xa7, 0x26, 0xb5, 0x23, 0x7f, 0x1f, + 0x8a, 0xa2, 0xce, 0xce, 0x7d, 0x06, 0x8f, 0x09, 0x11, 0x23, 0xeb, 0x4e, 0x06, 0x0b, 0x3e, 0xf4, + 0x0a, 0xd4, 0x78, 0x31, 0x10, 0x89, 0x23, 0x89, 0x92, 0x50, 0xe5, 0x50, 0x66, 0x60, 0xfb, 0x8f, + 0xb3, 0x50, 0x14, 0x85, 0xfb, 0xed, 0xf0, 0x49, 0x2b, 0x32, 0x6d, 0xa6, 0x75, 0x20, 0x98, 0x76, + 0xa0, 0xd4, 0xe1, 0x45, 0x8a, 0x0d, 0x2f, 0xe8, 0x3e, 0x5c, 0x1b, 0xa9, 0xa6, 0x72, 0x4c, 0x94, + 0x0f, 0x5d, 0xcb, 0x54, 0x88, 0x39, 0xb2, 0x34, 0xa2, 0x29, 0xaa, 0xe3, 0xa8, 0x67, 0xe2, 0xc3, + 0xde, 0xd2, 0x48, 0x35, 0xd7, 0xc9, 0x23, 0xd7, 0x32, 0x7b, 0x1c, 0xdb, 0xa5, 0xc8, 0xf5, 0x12, + 0x14, 0x98, 0xea, 0xf2, 0xcf, 0x73, 0x00, 0xd3, 0x28, 0xa6, 0xfa, 0xeb, 0x06, 0xbb, 0xec, 0x8e, + 0x1c, 0x83, 0xdd, 0x91, 0xc5, 0x87, 0xa1, 0x28, 0x88, 0x72, 0xf9, 0xa6, 0xe1, 0x89, 0x5e, 0xcf, + 0x7e, 0xcf, 0x14, 0xb9, 0xfc, 0xff, 0x52, 0x9b, 0x29, 0xa4, 0xb7, 0x99, 0xef, 0x42, 0x41, 0xa7, + 0xc7, 0x72, 0x99, 0xb0, 0x88, 0xbe, 0x7c, 0x51, 0xa6, 0xb2, 0xf3, 0xbb, 0x93, 0xc1, 0x9c, 0x03, + 0xbd, 0x07, 0x25, 0x97, 0xe7, 0xee, 0xf2, 0xc9, 0xbc, 0xcf, 0x12, 0x89, 0x34, 0xdf, 0xc9, 0xe0, + 0x80, 0x8b, 0x16, 0x09, 0x3a, 0xa4, 0xc8, 0xff, 0x9e, 0x05, 0xc4, 0xde, 0x78, 0x4d, 0xcd, 0xb6, + 0xd8, 0x89, 0x36, 0x4f, 0x0c, 0x1d, 0x5d, 0x03, 0xc9, 0x77, 0xc6, 0xdc, 0xa1, 0xeb, 0xa5, 0xf3, + 0x67, 0x2b, 0xd2, 0x11, 0xde, 0xc5, 0x14, 0x86, 0xde, 0x87, 0xd2, 0x63, 0xa2, 0x6a, 0xc4, 0x09, + 0xe6, 0xc2, 0xb7, 0xe6, 0xbc, 0x1a, 0xc7, 0x24, 0xae, 0xee, 0x70, 0x9e, 0x9e, 0xe9, 0x39, 0x67, + 0x38, 0x90, 0x40, 0x4f, 0x91, 0x61, 0xba, 0x64, 0xe4, 0x3b, 0xc1, 0x37, 0xdd, 0x70, 0x8d, 0x96, + 0xa1, 0x44, 0x3d, 0x66, 0xf9, 0x9e, 0x68, 0xa0, 0xc1, 0xb2, 0xfd, 0x00, 0x6a, 0x51, 0x71, 0xa8, + 0x05, 0xd2, 0x13, 0x72, 0x26, 0xc2, 0x4f, 0x7f, 0xd2, 0xb9, 0x8b, 0x27, 0x39, 0x8f, 0x3b, 0x5f, + 0x3c, 0xc8, 0xdd, 0xcf, 0xca, 0x7f, 0x9e, 0x85, 0xd6, 0x74, 0x54, 0x14, 0xe6, 0xb6, 0xa1, 0x4c, + 0xc7, 0xc2, 0x48, 0x12, 0x85, 0xeb, 0x70, 0x7c, 0xcc, 0xa5, 0x8c, 0x8f, 0xd2, 0x9c, 0xf1, 0x31, + 0x7f, 0xc1, 0xf8, 0x58, 0xb8, 0x60, 0x7c, 0x2c, 0xc6, 0xc7, 0x47, 0x79, 0x00, 0x35, 0xea, 0x4a, + 0x4c, 0xf8, 0xfb, 0xdd, 0xff, 0x78, 0x0a, 0x92, 0xff, 0x26, 0x07, 0x57, 0xd3, 0x9f, 0xf4, 0xd1, + 0x1e, 0x34, 0x89, 0x08, 0x19, 0xbd, 0x18, 0x9e, 0x18, 0xc1, 0x67, 0xf0, 0x9b, 0x97, 0x89, 0x2f, + 0x6e, 0x90, 0x78, 0x06, 0x3d, 0x80, 0xb2, 0x23, 0xd4, 0x16, 0x15, 0xab, 0x93, 0x2e, 0x27, 0x30, + 0x0e, 0x87, 0xf4, 0xe8, 0x1e, 0x94, 0x26, 0x2c, 0x71, 0x83, 0x22, 0xfe, 0xd2, 0x45, 0xd9, 0x8d, + 0x03, 0x62, 0xf4, 0x26, 0x14, 0x68, 0x47, 0x0f, 0x0e, 0x6e, 0x3b, 0x9d, 0x8b, 0xb6, 0x6e, 0xcc, + 0x09, 0xd1, 0x77, 0x20, 0x3f, 0xb6, 0xf4, 0xe0, 0x03, 0xfa, 0xb5, 0x74, 0x86, 0x5d, 0x4b, 0xc7, + 0x8c, 0x4c, 0xfe, 0x13, 0x09, 0x5e, 0xba, 0xe8, 0x6b, 0x02, 0x1a, 0xc0, 0x42, 0xe4, 0xcb, 0x44, + 0xcc, 0x8d, 0xf2, 0x45, 0x1f, 0x26, 0x84, 0x13, 0x23, 0x9f, 0x22, 0x84, 0x1b, 0xe3, 0x0f, 0x97, + 0xb9, 0xd9, 0x87, 0x4b, 0x92, 0x7c, 0xd1, 0xe0, 0x1e, 0x7b, 0xf8, 0xcd, 0x3e, 0x83, 0x5c, 0xfc, + 0xc0, 0xd1, 0xfe, 0x34, 0x3b, 0xfb, 0x76, 0xf1, 0x06, 0x20, 0xc3, 0x9c, 0x5e, 0xf1, 0x23, 0x7d, + 0xbc, 0x80, 0x5b, 0x0c, 0x13, 0xad, 0x74, 0x77, 0xe1, 0x6a, 0xcc, 0x2d, 0xe1, 0xdd, 0x47, 0x58, + 0xb4, 0x18, 0xb5, 0x3b, 0xb8, 0x04, 0xcd, 0x36, 0x20, 0xe9, 0x32, 0x0d, 0x48, 0xfe, 0xdb, 0x2c, + 0xb4, 0x66, 0x1f, 0xbc, 0xd0, 0xbb, 0x50, 0x1e, 0x59, 0xa6, 0xeb, 0xa9, 0xa6, 0x27, 0xa2, 0x71, + 0xf1, 0x65, 0x76, 0x27, 0x83, 0x43, 0x06, 0xb4, 0x36, 0xd3, 0x79, 0xe7, 0x3e, 0x62, 0x45, 0x7a, + 0xed, 0x1a, 0xe4, 0x4f, 0x7c, 0x73, 0x24, 0xbe, 0x35, 0xbf, 0x34, 0x6f, 0xb3, 0x2d, 0xdf, 0x1c, + 0xed, 0x64, 0x30, 0xa3, 0x9d, 0x76, 0xb7, 0xbf, 0xcb, 0x41, 0x35, 0xa2, 0x0c, 0xba, 0x03, 0x15, + 0x5a, 0x11, 0x9e, 0xd7, 0x86, 0x59, 0xd9, 0x60, 0x4d, 0x78, 0x05, 0xe0, 0xd8, 0xb2, 0xc6, 0xca, + 0xb4, 0x04, 0x96, 0x77, 0x32, 0xb8, 0x42, 0x61, 0x5c, 0xe2, 0xcb, 0x50, 0x35, 0x4c, 0xef, 0xde, + 0xdd, 0xc8, 0x24, 0x40, 0x47, 0x3a, 0x30, 0xc2, 0x2f, 0x3d, 0xe8, 0x55, 0xa8, 0xb3, 0x71, 0x30, + 0x24, 0xa2, 0x35, 0x2d, 0xbb, 0x93, 0xc1, 0x35, 0x01, 0xe6, 0x64, 0xb3, 0x43, 0x45, 0x21, 0x65, + 0xa8, 0x40, 0xb7, 0x80, 0xf5, 0xbe, 0x7b, 0x77, 0x15, 0xd3, 0x15, 0x74, 0x45, 0xb1, 0x65, 0x9d, + 0x23, 0xf6, 0x5d, 0x4e, 0x79, 0x1f, 0xea, 0xbe, 0x61, 0x7a, 0x6f, 0xad, 0xdd, 0x17, 0x74, 0xfc, + 0x53, 0xee, 0xc2, 0xd4, 0xdc, 0xa3, 0x3e, 0x43, 0xb3, 0x4f, 0xa4, 0x9c, 0x92, 0x4f, 0xbd, 0x81, + 0xf7, 0x1e, 0xe5, 0xcb, 0xe5, 0x56, 0x45, 0xfe, 0x3c, 0x0b, 0x30, 0xf5, 0x71, 0xea, 0x84, 0xf0, + 0x00, 0x2a, 0x86, 0x69, 0x78, 0x8a, 0xea, 0xe8, 0x97, 0x7c, 0xe2, 0x28, 0x53, 0xfa, 0xae, 0xa3, + 0xbb, 0xe8, 0x1e, 0xe4, 0x19, 0x9b, 0x74, 0xe9, 0xf7, 0x71, 0x46, 0x2f, 0xfe, 0x55, 0x05, 0x6f, + 0x67, 0x39, 0x43, 0x43, 0x0f, 0xa0, 0x49, 0xe1, 0x4a, 0x18, 0x5f, 0x5e, 0x8a, 0xd2, 0x03, 0x5c, + 0xa7, 0xa4, 0xc1, 0xca, 0x95, 0xff, 0x23, 0x07, 0x57, 0x52, 0x1e, 0xc3, 0x43, 0x5b, 0xa5, 0x79, + 0xb6, 0xe6, 0xbf, 0x99, 0xad, 0xdf, 0x13, 0xb6, 0xf2, 0x1a, 0xf9, 0xfa, 0xa5, 0x5e, 0xe4, 0x57, + 0xbb, 0x8e, 0x1e, 0x33, 0xb9, 0x78, 0x91, 0xc9, 0xa5, 0x4b, 0x9a, 0xdc, 0xfe, 0x5d, 0x90, 0xba, + 0x8e, 0xfe, 0x7f, 0x7e, 0x9c, 0xa7, 0x47, 0x73, 0x2d, 0x9c, 0x8e, 0xa9, 0x97, 0x2d, 0x8d, 0x88, + 0x07, 0x3c, 0xf6, 0x9b, 0x4e, 0x1d, 0xd1, 0x27, 0x3b, 0xbe, 0xb8, 0xfd, 0x57, 0x12, 0xd4, 0xa2, + 0xff, 0xc0, 0x05, 0x5d, 0x83, 0xa5, 0xc1, 0x41, 0x0f, 0x77, 0x87, 0x03, 0xac, 0x0c, 0x7f, 0x74, + 0xd0, 0x53, 0x8e, 0xf6, 0xdf, 0xdf, 0x1f, 0xfc, 0x70, 0xbf, 0x95, 0x41, 0x2f, 0xc2, 0xd5, 0xbd, + 0xde, 0xde, 0x00, 0xff, 0x48, 0x39, 0x1c, 0x1c, 0xe1, 0x8d, 0x9e, 0x12, 0x10, 0xb6, 0xbe, 0x2a, + 0xa1, 0x6b, 0xb0, 0xb8, 0x8d, 0x0f, 0x36, 0x12, 0xa8, 0x7f, 0x2a, 0x53, 0xd4, 0xd1, 0xe6, 0x70, + 0x2b, 0x81, 0xfa, 0x59, 0x05, 0xb5, 0x61, 0xa9, 0xb7, 0x77, 0x30, 0x4c, 0x4a, 0xfc, 0x43, 0x40, + 0x2b, 0xd0, 0xde, 0xd8, 0xed, 0x6f, 0xbc, 0xbf, 0x33, 0x38, 0x3a, 0xec, 0x25, 0x08, 0xfe, 0x13, + 0xd0, 0x02, 0xd4, 0xf6, 0xba, 0x07, 0x53, 0xd0, 0x67, 0x4d, 0xf4, 0x02, 0xa0, 0xee, 0xf6, 0x36, + 0xee, 0x6d, 0x77, 0x87, 0x11, 0xda, 0xbf, 0x6e, 0xa1, 0x45, 0x68, 0x6e, 0xf5, 0x77, 0x87, 0x3d, + 0x3c, 0x85, 0xfe, 0xd1, 0x02, 0xba, 0x02, 0x8d, 0xdd, 0xfe, 0x5e, 0x7f, 0x38, 0x05, 0xfe, 0x17, + 0x03, 0x1e, 0xed, 0xf7, 0x07, 0xfb, 0x53, 0xe0, 0xe7, 0x08, 0x21, 0xa8, 0x3f, 0x1a, 0xf4, 0x23, + 0xb0, 0x7f, 0xb8, 0x42, 0xed, 0x0a, 0xfc, 0xd1, 0xdf, 0x7f, 0x7f, 0x8a, 0xfa, 0x74, 0x8b, 0xea, + 0xc1, 0xbd, 0x11, 0x43, 0xfc, 0x74, 0x1b, 0x75, 0xe0, 0xda, 0x60, 0xd8, 0xdb, 0x55, 0x7a, 0xbf, + 0x79, 0x30, 0xc0, 0xc3, 0x19, 0xfc, 0xd7, 0xdb, 0xe8, 0x26, 0xac, 0x44, 0x8c, 0x4e, 0xa5, 0xfa, + 0xb7, 0x9d, 0xf5, 0x87, 0x9f, 0x7d, 0xd1, 0xc9, 0xfc, 0xf2, 0x8b, 0x4e, 0xe6, 0xeb, 0x2f, 0x3a, + 0xd9, 0x9f, 0x9c, 0x77, 0xb2, 0x9f, 0x9e, 0x77, 0xb2, 0xff, 0x78, 0xde, 0xc9, 0x7e, 0x76, 0xde, + 0xc9, 0x7e, 0x7e, 0xde, 0xc9, 0x7e, 0x75, 0xde, 0xc9, 0x7c, 0x7d, 0xde, 0xc9, 0xfe, 0xc1, 0x97, + 0x9d, 0xcc, 0x67, 0x5f, 0x76, 0x32, 0xbf, 0xfc, 0xb2, 0x93, 0xf9, 0xad, 0x22, 0xcf, 0xa0, 0xe3, + 0x22, 0xfb, 0x78, 0xfe, 0xf6, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x01, 0x0f, 0xea, 0x8a, + 0x29, 0x00, 0x00, } func (x OperatorType) String() string { @@ -3783,14 +4182,6 @@ func (this *Operator) Equal(that interface{}) bool { } else if !this.Op.Equal(that1.Op) { return false } - if len(this.Context) != len(that1.Context) { - return false - } - for i := range this.Context { - if this.Context[i] != that1.Context[i] { - return false - } - } return true } func (this *Operator_MemSourceOp) Equal(that interface{}) bool { @@ -4105,6 +4496,54 @@ func (this *Operator_OTelSinkOp) Equal(that interface{}) bool { } return true } +func (this *Operator_ClickhouseSourceOp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Operator_ClickhouseSourceOp) + if !ok { + that2, ok := that.(Operator_ClickhouseSourceOp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ClickhouseSourceOp.Equal(that1.ClickhouseSourceOp) { + return false + } + return true +} +func (this *Operator_ClickhouseSinkOp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Operator_ClickhouseSinkOp) + if !ok { + that2, ok := that.(Operator_ClickhouseSinkOp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ClickhouseSinkOp.Equal(that1.ClickhouseSinkOp) { + return false + } + return true +} func (this *MemorySourceOperator) Equal(that interface{}) bool { if that == nil { return this == nil @@ -4820,6 +5259,79 @@ func (this *EmptySourceOperator) Equal(that interface{}) bool { } return true } +func (this *ClickHouseSourceOperator) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ClickHouseSourceOperator) + if !ok { + that2, ok := that.(ClickHouseSourceOperator) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Host != that1.Host { + return false + } + if this.Port != that1.Port { + return false + } + if this.Username != that1.Username { + return false + } + if this.Password != that1.Password { + return false + } + if this.Database != that1.Database { + return false + } + if this.Query != that1.Query { + return false + } + if len(this.ColumnNames) != len(that1.ColumnNames) { + return false + } + for i := range this.ColumnNames { + if this.ColumnNames[i] != that1.ColumnNames[i] { + return false + } + } + if len(this.ColumnTypes) != len(that1.ColumnTypes) { + return false + } + for i := range this.ColumnTypes { + if this.ColumnTypes[i] != that1.ColumnTypes[i] { + return false + } + } + if this.BatchSize != that1.BatchSize { + return false + } + if this.Streaming != that1.Streaming { + return false + } + if this.TimestampColumn != that1.TimestampColumn { + return false + } + if this.PartitionColumn != that1.PartitionColumn { + return false + } + if this.StartTime != that1.StartTime { + return false + } + if this.EndTime != that1.EndTime { + return false + } + return true +} func (this *OTelLog) Equal(that interface{}) bool { if that == nil { return this == nil @@ -5355,6 +5867,45 @@ func (this *OTelEndpointConfig) Equal(that interface{}) bool { } return true } +func (this *ClickHouseConfig) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ClickHouseConfig) + if !ok { + that2, ok := that.(ClickHouseConfig) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Hostname != that1.Hostname { + return false + } + if this.Host != that1.Host { + return false + } + if this.Port != that1.Port { + return false + } + if this.Username != that1.Username { + return false + } + if this.Password != that1.Password { + return false + } + if this.Database != that1.Database { + return false + } + return true +} func (this *OTelResource) Equal(that interface{}) bool { if that == nil { return this == nil @@ -5435,6 +5986,71 @@ func (this *OTelExportSinkOperator) Equal(that interface{}) bool { } return true } +func (this *ClickHouseExportSinkOperator) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ClickHouseExportSinkOperator) + if !ok { + that2, ok := that.(ClickHouseExportSinkOperator) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ClickhouseConfig.Equal(that1.ClickhouseConfig) { + return false + } + if this.TableName != that1.TableName { + return false + } + if len(this.ColumnMappings) != len(that1.ColumnMappings) { + return false + } + for i := range this.ColumnMappings { + if !this.ColumnMappings[i].Equal(that1.ColumnMappings[i]) { + return false + } + } + return true +} +func (this *ClickHouseExportSinkOperator_ColumnMapping) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ClickHouseExportSinkOperator_ColumnMapping) + if !ok { + that2, ok := that.(ClickHouseExportSinkOperator_ColumnMapping) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.InputColumnIndex != that1.InputColumnIndex { + return false + } + if this.ClickhouseColumnName != that1.ClickhouseColumnName { + return false + } + if this.ColumnType != that1.ColumnType { + return false + } + return true +} func (this *ScalarExpression) Equal(that interface{}) bool { if that == nil { return this == nil @@ -6025,25 +6641,12 @@ func (this *Operator) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 19) + s := make([]string, 0, 20) s = append(s, "&planpb.Operator{") s = append(s, "OpType: "+fmt.Sprintf("%#v", this.OpType)+",\n") if this.Op != nil { s = append(s, "Op: "+fmt.Sprintf("%#v", this.Op)+",\n") } - keysForContext := make([]string, 0, len(this.Context)) - for k, _ := range this.Context { - keysForContext = append(keysForContext, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForContext) - mapStringForContext := "map[string]string{" - for _, k := range keysForContext { - mapStringForContext += fmt.Sprintf("%#v: %#v,", k, this.Context[k]) - } - mapStringForContext += "}" - if this.Context != nil { - s = append(s, "Context: "+mapStringForContext+",\n") - } s = append(s, "}") return strings.Join(s, "") } @@ -6151,6 +6754,22 @@ func (this *Operator_OTelSinkOp) GoString() string { `OTelSinkOp:` + fmt.Sprintf("%#v", this.OTelSinkOp) + `}`}, ", ") return s } +func (this *Operator_ClickhouseSourceOp) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&planpb.Operator_ClickhouseSourceOp{` + + `ClickhouseSourceOp:` + fmt.Sprintf("%#v", this.ClickhouseSourceOp) + `}`}, ", ") + return s +} +func (this *Operator_ClickhouseSinkOp) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&planpb.Operator_ClickhouseSinkOp{` + + `ClickhouseSinkOp:` + fmt.Sprintf("%#v", this.ClickhouseSinkOp) + `}`}, ", ") + return s +} func (this *MemorySourceOperator) GoString() string { if this == nil { return "nil" @@ -6401,6 +7020,29 @@ func (this *EmptySourceOperator) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ClickHouseSourceOperator) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 18) + s = append(s, "&planpb.ClickHouseSourceOperator{") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") + s = append(s, "Password: "+fmt.Sprintf("%#v", this.Password)+",\n") + s = append(s, "Database: "+fmt.Sprintf("%#v", this.Database)+",\n") + s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") + s = append(s, "ColumnNames: "+fmt.Sprintf("%#v", this.ColumnNames)+",\n") + s = append(s, "ColumnTypes: "+fmt.Sprintf("%#v", this.ColumnTypes)+",\n") + s = append(s, "BatchSize: "+fmt.Sprintf("%#v", this.BatchSize)+",\n") + s = append(s, "Streaming: "+fmt.Sprintf("%#v", this.Streaming)+",\n") + s = append(s, "TimestampColumn: "+fmt.Sprintf("%#v", this.TimestampColumn)+",\n") + s = append(s, "PartitionColumn: "+fmt.Sprintf("%#v", this.PartitionColumn)+",\n") + s = append(s, "StartTime: "+fmt.Sprintf("%#v", this.StartTime)+",\n") + s = append(s, "EndTime: "+fmt.Sprintf("%#v", this.EndTime)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *OTelLog) GoString() string { if this == nil { return "nil" @@ -6609,6 +7251,21 @@ func (this *OTelEndpointConfig) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ClickHouseConfig) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&planpb.ClickHouseConfig{") + s = append(s, "Hostname: "+fmt.Sprintf("%#v", this.Hostname)+",\n") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "Port: "+fmt.Sprintf("%#v", this.Port)+",\n") + s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") + s = append(s, "Password: "+fmt.Sprintf("%#v", this.Password)+",\n") + s = append(s, "Database: "+fmt.Sprintf("%#v", this.Database)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *OTelResource) GoString() string { if this == nil { return "nil" @@ -6645,6 +7302,34 @@ func (this *OTelExportSinkOperator) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ClickHouseExportSinkOperator) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&planpb.ClickHouseExportSinkOperator{") + if this.ClickhouseConfig != nil { + s = append(s, "ClickhouseConfig: "+fmt.Sprintf("%#v", this.ClickhouseConfig)+",\n") + } + s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") + if this.ColumnMappings != nil { + s = append(s, "ColumnMappings: "+fmt.Sprintf("%#v", this.ColumnMappings)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ClickHouseExportSinkOperator_ColumnMapping) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&planpb.ClickHouseExportSinkOperator_ColumnMapping{") + s = append(s, "InputColumnIndex: "+fmt.Sprintf("%#v", this.InputColumnIndex)+",\n") + s = append(s, "ClickhouseColumnName: "+fmt.Sprintf("%#v", this.ClickhouseColumnName)+",\n") + s = append(s, "ColumnType: "+fmt.Sprintf("%#v", this.ColumnType)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *ScalarExpression) GoString() string { if this == nil { return "nil" @@ -7223,25 +7908,6 @@ func (m *Operator) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } } - if len(m.Context) > 0 { - for k := range m.Context { - v := m.Context[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintPlan(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintPlan(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintPlan(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x7a - } - } if m.OpType != 0 { i = encodeVarintPlan(dAtA, i, uint64(m.OpType)) i-- @@ -7502,12 +8168,56 @@ func (m *Operator_OTelSinkOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Operator_GRPCSinkOp) MarshalTo(dAtA []byte) (int, error) { +func (m *Operator_ClickhouseSourceOp) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Operator_GRPCSinkOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Operator_ClickhouseSourceOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ClickhouseSourceOp != nil { + { + size, err := m.ClickhouseSourceOp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlan(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Operator_ClickhouseSinkOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Operator_ClickhouseSinkOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ClickhouseSinkOp != nil { + { + size, err := m.ClickhouseSinkOp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlan(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *Operator_GRPCSinkOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Operator_GRPCSinkOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.GRPCSinkOp != nil { { @@ -7587,20 +8297,20 @@ func (m *MemorySourceOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x2a } if len(m.ColumnTypes) > 0 { - dAtA25 := make([]byte, len(m.ColumnTypes)*10) - var j24 int + dAtA27 := make([]byte, len(m.ColumnTypes)*10) + var j26 int for _, num := range m.ColumnTypes { for num >= 1<<7 { - dAtA25[j24] = uint8(uint64(num)&0x7f | 0x80) + dAtA27[j26] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j24++ + j26++ } - dAtA25[j24] = uint8(num) - j24++ + dAtA27[j26] = uint8(num) + j26++ } - i -= j24 - copy(dAtA[i:], dAtA25[:j24]) - i = encodeVarintPlan(dAtA, i, uint64(j24)) + i -= j26 + copy(dAtA[i:], dAtA27[:j26]) + i = encodeVarintPlan(dAtA, i, uint64(j26)) i-- dAtA[i] = 0x22 } @@ -7614,21 +8324,21 @@ func (m *MemorySourceOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } if len(m.ColumnIdxs) > 0 { - dAtA27 := make([]byte, len(m.ColumnIdxs)*10) - var j26 int + dAtA29 := make([]byte, len(m.ColumnIdxs)*10) + var j28 int for _, num1 := range m.ColumnIdxs { num := uint64(num1) for num >= 1<<7 { - dAtA27[j26] = uint8(uint64(num)&0x7f | 0x80) + dAtA29[j28] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j26++ + j28++ } - dAtA27[j26] = uint8(num) - j26++ + dAtA29[j28] = uint8(num) + j28++ } - i -= j26 - copy(dAtA[i:], dAtA27[:j26]) - i = encodeVarintPlan(dAtA, i, uint64(j26)) + i -= j28 + copy(dAtA[i:], dAtA29[:j28]) + i = encodeVarintPlan(dAtA, i, uint64(j28)) i-- dAtA[i] = 0x12 } @@ -7663,20 +8373,20 @@ func (m *MemorySinkOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.ColumnSemanticTypes) > 0 { - dAtA29 := make([]byte, len(m.ColumnSemanticTypes)*10) - var j28 int + dAtA31 := make([]byte, len(m.ColumnSemanticTypes)*10) + var j30 int for _, num := range m.ColumnSemanticTypes { for num >= 1<<7 { - dAtA29[j28] = uint8(uint64(num)&0x7f | 0x80) + dAtA31[j30] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j28++ + j30++ } - dAtA29[j28] = uint8(num) - j28++ + dAtA31[j30] = uint8(num) + j30++ } - i -= j28 - copy(dAtA[i:], dAtA29[:j28]) - i = encodeVarintPlan(dAtA, i, uint64(j28)) + i -= j30 + copy(dAtA[i:], dAtA31[:j30]) + i = encodeVarintPlan(dAtA, i, uint64(j30)) i-- dAtA[i] = 0x22 } @@ -7690,20 +8400,20 @@ func (m *MemorySinkOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } if len(m.ColumnTypes) > 0 { - dAtA31 := make([]byte, len(m.ColumnTypes)*10) - var j30 int + dAtA33 := make([]byte, len(m.ColumnTypes)*10) + var j32 int for _, num := range m.ColumnTypes { for num >= 1<<7 { - dAtA31[j30] = uint8(uint64(num)&0x7f | 0x80) + dAtA33[j32] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j30++ + j32++ } - dAtA31[j30] = uint8(num) - j30++ + dAtA33[j32] = uint8(num) + j32++ } - i -= j30 - copy(dAtA[i:], dAtA31[:j30]) - i = encodeVarintPlan(dAtA, i, uint64(j30)) + i -= j32 + copy(dAtA[i:], dAtA33[:j32]) + i = encodeVarintPlan(dAtA, i, uint64(j32)) i-- dAtA[i] = 0x12 } @@ -7747,20 +8457,20 @@ func (m *GRPCSourceOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { } } if len(m.ColumnTypes) > 0 { - dAtA33 := make([]byte, len(m.ColumnTypes)*10) - var j32 int + dAtA35 := make([]byte, len(m.ColumnTypes)*10) + var j34 int for _, num := range m.ColumnTypes { for num >= 1<<7 { - dAtA33[j32] = uint8(uint64(num)&0x7f | 0x80) + dAtA35[j34] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j32++ + j34++ } - dAtA33[j32] = uint8(num) - j32++ + dAtA35[j34] = uint8(num) + j34++ } - i -= j32 - copy(dAtA[i:], dAtA33[:j32]) - i = encodeVarintPlan(dAtA, i, uint64(j32)) + i -= j34 + copy(dAtA[i:], dAtA35[:j34]) + i = encodeVarintPlan(dAtA, i, uint64(j34)) i-- dAtA[i] = 0xa } @@ -7872,20 +8582,20 @@ func (m *GRPCSinkOperator_ResultTable) MarshalToSizedBuffer(dAtA []byte) (int, e var l int _ = l if len(m.ColumnSemanticTypes) > 0 { - dAtA37 := make([]byte, len(m.ColumnSemanticTypes)*10) - var j36 int + dAtA39 := make([]byte, len(m.ColumnSemanticTypes)*10) + var j38 int for _, num := range m.ColumnSemanticTypes { for num >= 1<<7 { - dAtA37[j36] = uint8(uint64(num)&0x7f | 0x80) + dAtA39[j38] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j36++ + j38++ } - dAtA37[j36] = uint8(num) - j36++ + dAtA39[j38] = uint8(num) + j38++ } - i -= j36 - copy(dAtA[i:], dAtA37[:j36]) - i = encodeVarintPlan(dAtA, i, uint64(j36)) + i -= j38 + copy(dAtA[i:], dAtA39[:j38]) + i = encodeVarintPlan(dAtA, i, uint64(j38)) i-- dAtA[i] = 0x22 } @@ -7899,20 +8609,20 @@ func (m *GRPCSinkOperator_ResultTable) MarshalToSizedBuffer(dAtA []byte) (int, e } } if len(m.ColumnTypes) > 0 { - dAtA39 := make([]byte, len(m.ColumnTypes)*10) - var j38 int + dAtA41 := make([]byte, len(m.ColumnTypes)*10) + var j40 int for _, num := range m.ColumnTypes { for num >= 1<<7 { - dAtA39[j38] = uint8(uint64(num)&0x7f | 0x80) + dAtA41[j40] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j38++ + j40++ } - dAtA39[j38] = uint8(num) - j38++ + dAtA41[j40] = uint8(num) + j40++ } - i -= j38 - copy(dAtA[i:], dAtA39[:j38]) - i = encodeVarintPlan(dAtA, i, uint64(j38)) + i -= j40 + copy(dAtA[i:], dAtA41[:j40]) + i = encodeVarintPlan(dAtA, i, uint64(j40)) i-- dAtA[i] = 0x12 } @@ -8171,20 +8881,20 @@ func (m *LimitOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.AbortableSrcs) > 0 { - dAtA42 := make([]byte, len(m.AbortableSrcs)*10) - var j41 int + dAtA44 := make([]byte, len(m.AbortableSrcs)*10) + var j43 int for _, num := range m.AbortableSrcs { for num >= 1<<7 { - dAtA42[j41] = uint8(uint64(num)&0x7f | 0x80) + dAtA44[j43] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j41++ + j43++ } - dAtA42[j41] = uint8(num) - j41++ + dAtA44[j43] = uint8(num) + j43++ } - i -= j41 - copy(dAtA[i:], dAtA42[:j41]) - i = encodeVarintPlan(dAtA, i, uint64(j41)) + i -= j43 + copy(dAtA[i:], dAtA44[:j43]) + i = encodeVarintPlan(dAtA, i, uint64(j43)) i-- dAtA[i] = 0x1a } @@ -8282,21 +8992,21 @@ func (m *UnionOperator_ColumnMapping) MarshalToSizedBuffer(dAtA []byte) (int, er var l int _ = l if len(m.ColumnIndexes) > 0 { - dAtA44 := make([]byte, len(m.ColumnIndexes)*10) - var j43 int + dAtA46 := make([]byte, len(m.ColumnIndexes)*10) + var j45 int for _, num1 := range m.ColumnIndexes { num := uint64(num1) for num >= 1<<7 { - dAtA44[j43] = uint8(uint64(num)&0x7f | 0x80) + dAtA46[j45] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j43++ + j45++ } - dAtA44[j43] = uint8(num) - j43++ + dAtA46[j45] = uint8(num) + j45++ } - i -= j43 - copy(dAtA[i:], dAtA44[:j43]) - i = encodeVarintPlan(dAtA, i, uint64(j43)) + i -= j45 + copy(dAtA[i:], dAtA46[:j45]) + i = encodeVarintPlan(dAtA, i, uint64(j45)) i-- dAtA[i] = 0xa } @@ -8504,20 +9214,20 @@ func (m *EmptySourceOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.ColumnTypes) > 0 { - dAtA46 := make([]byte, len(m.ColumnTypes)*10) - var j45 int + dAtA48 := make([]byte, len(m.ColumnTypes)*10) + var j47 int for _, num := range m.ColumnTypes { for num >= 1<<7 { - dAtA46[j45] = uint8(uint64(num)&0x7f | 0x80) + dAtA48[j47] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j45++ + j47++ } - dAtA46[j45] = uint8(num) - j45++ + dAtA48[j47] = uint8(num) + j47++ } - i -= j45 - copy(dAtA[i:], dAtA46[:j45]) - i = encodeVarintPlan(dAtA, i, uint64(j45)) + i -= j47 + copy(dAtA[i:], dAtA48[:j47]) + i = encodeVarintPlan(dAtA, i, uint64(j47)) i-- dAtA[i] = 0x12 } @@ -8533,6 +9243,135 @@ func (m *EmptySourceOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ClickHouseSourceOperator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClickHouseSourceOperator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClickHouseSourceOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EndTime != 0 { + i = encodeVarintPlan(dAtA, i, uint64(m.EndTime)) + i-- + dAtA[i] = 0x70 + } + if m.StartTime != 0 { + i = encodeVarintPlan(dAtA, i, uint64(m.StartTime)) + i-- + dAtA[i] = 0x68 + } + if len(m.PartitionColumn) > 0 { + i -= len(m.PartitionColumn) + copy(dAtA[i:], m.PartitionColumn) + i = encodeVarintPlan(dAtA, i, uint64(len(m.PartitionColumn))) + i-- + dAtA[i] = 0x62 + } + if len(m.TimestampColumn) > 0 { + i -= len(m.TimestampColumn) + copy(dAtA[i:], m.TimestampColumn) + i = encodeVarintPlan(dAtA, i, uint64(len(m.TimestampColumn))) + i-- + dAtA[i] = 0x5a + } + if m.Streaming { + i-- + if m.Streaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.BatchSize != 0 { + i = encodeVarintPlan(dAtA, i, uint64(m.BatchSize)) + i-- + dAtA[i] = 0x48 + } + if len(m.ColumnTypes) > 0 { + dAtA50 := make([]byte, len(m.ColumnTypes)*10) + var j49 int + for _, num := range m.ColumnTypes { + for num >= 1<<7 { + dAtA50[j49] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j49++ + } + dAtA50[j49] = uint8(num) + j49++ + } + i -= j49 + copy(dAtA[i:], dAtA50[:j49]) + i = encodeVarintPlan(dAtA, i, uint64(j49)) + i-- + dAtA[i] = 0x42 + } + if len(m.ColumnNames) > 0 { + for iNdEx := len(m.ColumnNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ColumnNames[iNdEx]) + copy(dAtA[i:], m.ColumnNames[iNdEx]) + i = encodeVarintPlan(dAtA, i, uint64(len(m.ColumnNames[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x32 + } + if len(m.Database) > 0 { + i -= len(m.Database) + copy(dAtA[i:], m.Database) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Database))) + i-- + dAtA[i] = 0x2a + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x22 + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x1a + } + if m.Port != 0 { + i = encodeVarintPlan(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *OTelLog) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9135,7 +9974,7 @@ func (m *OTelEndpointConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *OTelResource) Marshal() (dAtA []byte, err error) { +func (m *ClickHouseConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -9145,41 +9984,104 @@ func (m *OTelResource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *OTelResource) MarshalTo(dAtA []byte) (int, error) { +func (m *ClickHouseConfig) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *OTelResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ClickHouseConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlan(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + if len(m.Database) > 0 { + i -= len(m.Database) + copy(dAtA[i:], m.Database) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Database))) + i-- + dAtA[i] = 0x32 } - return len(dAtA) - i, nil -} - -func (m *OTelExportSinkOperator) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x2a } - return dAtA[:n], nil + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x22 + } + if m.Port != 0 { + i = encodeVarintPlan(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintPlan(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OTelResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OTelResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OTelResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlan(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OTelExportSinkOperator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } func (m *OTelExportSinkOperator) MarshalTo(dAtA []byte) (int, error) { @@ -9261,6 +10163,102 @@ func (m *OTelExportSinkOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *ClickHouseExportSinkOperator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClickHouseExportSinkOperator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClickHouseExportSinkOperator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ColumnMappings) > 0 { + for iNdEx := len(m.ColumnMappings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ColumnMappings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlan(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarintPlan(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0x12 + } + if m.ClickhouseConfig != nil { + { + size, err := m.ClickhouseConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPlan(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClickHouseExportSinkOperator_ColumnMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClickHouseExportSinkOperator_ColumnMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClickHouseExportSinkOperator_ColumnMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ColumnType != 0 { + i = encodeVarintPlan(dAtA, i, uint64(m.ColumnType)) + i-- + dAtA[i] = 0x18 + } + if len(m.ClickhouseColumnName) > 0 { + i -= len(m.ClickhouseColumnName) + copy(dAtA[i:], m.ClickhouseColumnName) + i = encodeVarintPlan(dAtA, i, uint64(len(m.ClickhouseColumnName))) + i-- + dAtA[i] = 0x12 + } + if m.InputColumnIndex != 0 { + i = encodeVarintPlan(dAtA, i, uint64(m.InputColumnIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *ScalarExpression) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9503,20 +10501,20 @@ func (m *ScalarFunc) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.ArgsDataTypes) > 0 { - dAtA57 := make([]byte, len(m.ArgsDataTypes)*10) - var j56 int + dAtA62 := make([]byte, len(m.ArgsDataTypes)*10) + var j61 int for _, num := range m.ArgsDataTypes { for num >= 1<<7 { - dAtA57[j56] = uint8(uint64(num)&0x7f | 0x80) + dAtA62[j61] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j56++ + j61++ } - dAtA57[j56] = uint8(num) - j56++ + dAtA62[j61] = uint8(num) + j61++ } - i -= j56 - copy(dAtA[i:], dAtA57[:j56]) - i = encodeVarintPlan(dAtA, i, uint64(j56)) + i -= j61 + copy(dAtA[i:], dAtA62[:j61]) + i = encodeVarintPlan(dAtA, i, uint64(j61)) i-- dAtA[i] = 0x2a } @@ -9584,20 +10582,20 @@ func (m *AggregateExpression) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.ArgsDataTypes) > 0 { - dAtA59 := make([]byte, len(m.ArgsDataTypes)*10) - var j58 int + dAtA64 := make([]byte, len(m.ArgsDataTypes)*10) + var j63 int for _, num := range m.ArgsDataTypes { for num >= 1<<7 { - dAtA59[j58] = uint8(uint64(num)&0x7f | 0x80) + dAtA64[j63] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j58++ + j63++ } - dAtA59[j58] = uint8(num) - j58++ + dAtA64[j63] = uint8(num) + j63++ } - i -= j58 - copy(dAtA[i:], dAtA59[:j58]) - i = encodeVarintPlan(dAtA, i, uint64(j58)) + i -= j63 + copy(dAtA[i:], dAtA64[:j63]) + i = encodeVarintPlan(dAtA, i, uint64(j63)) i-- dAtA[i] = 0x3a } @@ -9923,14 +10921,6 @@ func (m *Operator) Size() (n int) { if m.Op != nil { n += m.Op.Size() } - if len(m.Context) > 0 { - for k, v := range m.Context { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovPlan(uint64(len(k))) + 1 + len(v) + sovPlan(uint64(len(v))) - n += mapEntrySize + 1 + sovPlan(uint64(mapEntrySize)) - } - } return n } @@ -10078,6 +11068,30 @@ func (m *Operator_OTelSinkOp) Size() (n int) { } return n } +func (m *Operator_ClickhouseSourceOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClickhouseSourceOp != nil { + l = m.ClickhouseSourceOp.Size() + n += 1 + l + sovPlan(uint64(l)) + } + return n +} +func (m *Operator_ClickhouseSinkOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClickhouseSinkOp != nil { + l = m.ClickhouseSinkOp.Size() + n += 2 + l + sovPlan(uint64(l)) + } + return n +} func (m *Operator_GRPCSinkOp) Size() (n int) { if m == nil { return 0 @@ -10531,6 +11545,71 @@ func (m *EmptySourceOperator) Size() (n int) { return n } +func (m *ClickHouseSourceOperator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + if m.Port != 0 { + n += 1 + sovPlan(uint64(m.Port)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + l = len(m.Database) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + if len(m.ColumnNames) > 0 { + for _, s := range m.ColumnNames { + l = len(s) + n += 1 + l + sovPlan(uint64(l)) + } + } + if len(m.ColumnTypes) > 0 { + l = 0 + for _, e := range m.ColumnTypes { + l += sovPlan(uint64(e)) + } + n += 1 + sovPlan(uint64(l)) + l + } + if m.BatchSize != 0 { + n += 1 + sovPlan(uint64(m.BatchSize)) + } + if m.Streaming { + n += 2 + } + l = len(m.TimestampColumn) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + l = len(m.PartitionColumn) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + if m.StartTime != 0 { + n += 1 + sovPlan(uint64(m.StartTime)) + } + if m.EndTime != 0 { + n += 1 + sovPlan(uint64(m.EndTime)) + } + return n +} + func (m *OTelLog) Size() (n int) { if m == nil { return 0 @@ -10823,6 +11902,38 @@ func (m *OTelEndpointConfig) Size() (n int) { return n } +func (m *ClickHouseConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + l = len(m.Host) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + if m.Port != 0 { + n += 1 + sovPlan(uint64(m.Port)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + l = len(m.Database) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + return n +} + func (m *OTelResource) Size() (n int) { if m == nil { return 0 @@ -10873,26 +11984,68 @@ func (m *OTelExportSinkOperator) Size() (n int) { return n } -func (m *ScalarExpression) Size() (n int) { +func (m *ClickHouseExportSinkOperator) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Value != nil { - n += m.Value.Size() + if m.ClickhouseConfig != nil { + l = m.ClickhouseConfig.Size() + n += 1 + l + sovPlan(uint64(l)) + } + l = len(m.TableName) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + if len(m.ColumnMappings) > 0 { + for _, e := range m.ColumnMappings { + l = e.Size() + n += 1 + l + sovPlan(uint64(l)) + } } return n } -func (m *ScalarExpression_Constant) Size() (n int) { +func (m *ClickHouseExportSinkOperator_ColumnMapping) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Constant != nil { - l = m.Constant.Size() + if m.InputColumnIndex != 0 { + n += 1 + sovPlan(uint64(m.InputColumnIndex)) + } + l = len(m.ClickhouseColumnName) + if l > 0 { + n += 1 + l + sovPlan(uint64(l)) + } + if m.ColumnType != 0 { + n += 1 + sovPlan(uint64(m.ColumnType)) + } + return n +} + +func (m *ScalarExpression) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *ScalarExpression_Constant) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Constant != nil { + l = m.Constant.Size() n += 1 + l + sovPlan(uint64(l)) } return n @@ -11232,20 +12385,9 @@ func (this *Operator) String() string { if this == nil { return "nil" } - keysForContext := make([]string, 0, len(this.Context)) - for k, _ := range this.Context { - keysForContext = append(keysForContext, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForContext) - mapStringForContext := "map[string]string{" - for _, k := range keysForContext { - mapStringForContext += fmt.Sprintf("%v: %v,", k, this.Context[k]) - } - mapStringForContext += "}" s := strings.Join([]string{`&Operator{`, `OpType:` + fmt.Sprintf("%v", this.OpType) + `,`, `Op:` + fmt.Sprintf("%v", this.Op) + `,`, - `Context:` + mapStringForContext + `,`, `}`, }, "") return s @@ -11370,6 +12512,26 @@ func (this *Operator_OTelSinkOp) String() string { }, "") return s } +func (this *Operator_ClickhouseSourceOp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Operator_ClickhouseSourceOp{`, + `ClickhouseSourceOp:` + strings.Replace(fmt.Sprintf("%v", this.ClickhouseSourceOp), "ClickHouseSourceOperator", "ClickHouseSourceOperator", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Operator_ClickhouseSinkOp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Operator_ClickhouseSinkOp{`, + `ClickhouseSinkOp:` + strings.Replace(fmt.Sprintf("%v", this.ClickhouseSinkOp), "ClickHouseExportSinkOperator", "ClickHouseExportSinkOperator", 1) + `,`, + `}`, + }, "") + return s +} func (this *Operator_GRPCSinkOp) String() string { if this == nil { return "nil" @@ -11651,6 +12813,29 @@ func (this *EmptySourceOperator) String() string { }, "") return s } +func (this *ClickHouseSourceOperator) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClickHouseSourceOperator{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Password:` + fmt.Sprintf("%v", this.Password) + `,`, + `Database:` + fmt.Sprintf("%v", this.Database) + `,`, + `Query:` + fmt.Sprintf("%v", this.Query) + `,`, + `ColumnNames:` + fmt.Sprintf("%v", this.ColumnNames) + `,`, + `ColumnTypes:` + fmt.Sprintf("%v", this.ColumnTypes) + `,`, + `BatchSize:` + fmt.Sprintf("%v", this.BatchSize) + `,`, + `Streaming:` + fmt.Sprintf("%v", this.Streaming) + `,`, + `TimestampColumn:` + fmt.Sprintf("%v", this.TimestampColumn) + `,`, + `PartitionColumn:` + fmt.Sprintf("%v", this.PartitionColumn) + `,`, + `StartTime:` + fmt.Sprintf("%v", this.StartTime) + `,`, + `EndTime:` + fmt.Sprintf("%v", this.EndTime) + `,`, + `}`, + }, "") + return s +} func (this *OTelLog) String() string { if this == nil { return "nil" @@ -11877,6 +13062,21 @@ func (this *OTelEndpointConfig) String() string { }, "") return s } +func (this *ClickHouseConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClickHouseConfig{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Password:` + fmt.Sprintf("%v", this.Password) + `,`, + `Database:` + fmt.Sprintf("%v", this.Database) + `,`, + `}`, + }, "") + return s +} func (this *OTelResource) String() string { if this == nil { return "nil" @@ -11921,6 +13121,35 @@ func (this *OTelExportSinkOperator) String() string { }, "") return s } +func (this *ClickHouseExportSinkOperator) String() string { + if this == nil { + return "nil" + } + repeatedStringForColumnMappings := "[]*ClickHouseExportSinkOperator_ColumnMapping{" + for _, f := range this.ColumnMappings { + repeatedStringForColumnMappings += strings.Replace(fmt.Sprintf("%v", f), "ClickHouseExportSinkOperator_ColumnMapping", "ClickHouseExportSinkOperator_ColumnMapping", 1) + "," + } + repeatedStringForColumnMappings += "}" + s := strings.Join([]string{`&ClickHouseExportSinkOperator{`, + `ClickhouseConfig:` + strings.Replace(this.ClickhouseConfig.String(), "ClickHouseConfig", "ClickHouseConfig", 1) + `,`, + `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, + `ColumnMappings:` + repeatedStringForColumnMappings + `,`, + `}`, + }, "") + return s +} +func (this *ClickHouseExportSinkOperator_ColumnMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClickHouseExportSinkOperator_ColumnMapping{`, + `InputColumnIndex:` + fmt.Sprintf("%v", this.InputColumnIndex) + `,`, + `ClickhouseColumnName:` + fmt.Sprintf("%v", this.ClickhouseColumnName) + `,`, + `ColumnType:` + fmt.Sprintf("%v", this.ColumnType) + `,`, + `}`, + }, "") + return s +} func (this *ScalarExpression) String() string { if this == nil { return "nil" @@ -13595,7 +14824,7 @@ func (m *Operator) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClickhouseSourceOp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13622,103 +14851,46 @@ func (m *Operator) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Context == nil { - m.Context = make(map[string]string) + v := &ClickHouseSourceOperator{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + m.Op = &Operator_ClickhouseSourceOp{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClickhouseSinkOp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthPlan - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthPlan - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthPlan - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthPlan - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipPlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlan - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF } - m.Context[mapkey] = mapvalue + v := &ClickHouseExportSinkOperator{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Operator_ClickhouseSinkOp{v} iNdEx = postIndex case 1000: if wireType != 2 { @@ -16618,7 +17790,7 @@ func (m *EmptySourceOperator) Unmarshal(dAtA []byte) error { } return nil } -func (m *OTelLog) Unmarshal(dAtA []byte) error { +func (m *ClickHouseSourceOperator) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16641,17 +17813,17 @@ func (m *OTelLog) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OTelLog: wiretype end group for non-group") + return fmt.Errorf("proto: ClickHouseSourceOperator: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OTelLog: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClickHouseSourceOperator: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16661,31 +17833,29 @@ func (m *OTelLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPlan } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthPlan } if postIndex > l { return io.ErrUnexpectedEOF } - m.Attributes = append(m.Attributes, &OTelAttribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeColumnIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - m.TimeColumnIndex = 0 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16695,16 +17865,16 @@ func (m *OTelLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeColumnIndex |= int64(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeColumnIndex", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) } - m.ObservedTimeColumnIndex = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16714,16 +17884,29 @@ func (m *OTelLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedTimeColumnIndex |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) } - m.SeverityNumber = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16733,16 +17916,29 @@ func (m *OTelLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SeverityNumber |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan } - var stringLen uint64 + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16768,13 +17964,13 @@ func (m *OTelLog) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SeverityText = string(dAtA[iNdEx:postIndex]) + m.Database = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BodyColumnIndex", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - m.BodyColumnIndex = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16784,64 +17980,27 @@ func (m *OTelLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BodyColumnIndex |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipPlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPlan } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OTelSpan) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OTelSpan: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OTelSpan: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NameString", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ColumnNames", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16869,67 +18028,82 @@ func (m *OTelSpan) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = &OTelSpan_NameString{string(dAtA[iNdEx:postIndex])} + m.ColumnNames = append(m.ColumnNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NameColumnIndex", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan + case 8: + if wireType == 0 { + var v typespb.DataType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= typespb.DataType(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.ColumnTypes = append(m.ColumnTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break + if packedLen < 0 { + return ErrInvalidLengthPlan } - } - m.Name = &OTelSpan_NameColumnIndex{v} - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthPlan } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + if elementCount != 0 && len(m.ColumnTypes) == 0 { + m.ColumnTypes = make([]typespb.DataType, 0, elementCount) } + for iNdEx < postIndex { + var v typespb.DataType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= typespb.DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ColumnTypes = append(m.ColumnTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnTypes", wireType) } - if msglen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, &OTelAttribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceIDColumn", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BatchSize", wireType) } - m.TraceIDColumn = 0 + m.BatchSize = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16939,16 +18113,16 @@ func (m *OTelSpan) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TraceIDColumn |= int64(b&0x7F) << shift + m.BatchSize |= int32(b&0x7F) << shift if b < 0x80 { break } } - case 5: + case 10: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanIDColumn", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Streaming", wireType) } - m.SpanIDColumn = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16958,16 +18132,17 @@ func (m *OTelSpan) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SpanIDColumn |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanIDColumn", wireType) + m.Streaming = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampColumn", wireType) } - m.ParentSpanIDColumn = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16977,16 +18152,29 @@ func (m *OTelSpan) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ParentSpanIDColumn |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeColumnIndex", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan } - m.StartTimeColumnIndex = 0 + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TimestampColumn = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartitionColumn", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -16996,16 +18184,29 @@ func (m *OTelSpan) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartTimeColumnIndex |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 8: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PartitionColumn = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimeColumnIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) } - m.EndTimeColumnIndex = 0 + m.StartTime = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17015,16 +18216,16 @@ func (m *OTelSpan) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.EndTimeColumnIndex |= int64(b&0x7F) << shift + m.StartTime |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 9: + case 14: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KindValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) } - m.KindValue = 0 + m.EndTime = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17034,7 +18235,7 @@ func (m *OTelSpan) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.KindValue |= int64(b&0x7F) << shift + m.EndTime |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -17060,7 +18261,7 @@ func (m *OTelSpan) Unmarshal(dAtA []byte) error { } return nil } -func (m *OTelMetricGauge) Unmarshal(dAtA []byte) error { +func (m *OTelLog) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17083,17 +18284,17 @@ func (m *OTelMetricGauge) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OTelMetricGauge: wiretype end group for non-group") + return fmt.Errorf("proto: OTelLog: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OTelMetricGauge: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OTelLog: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FloatColumnIndex", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } - var v int64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17103,17 +18304,31 @@ func (m *OTelMetricGauge) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.ValueColumn = &OTelMetricGauge_FloatColumnIndex{v} + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, &OTelAttribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntColumnIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeColumnIndex", wireType) } - var v int64 + m.TimeColumnIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17123,67 +18338,16 @@ func (m *OTelMetricGauge) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + m.TimeColumnIndex |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.ValueColumn = &OTelMetricGauge_IntColumnIndex{v} - default: - iNdEx = preIndex - skippy, err := skipPlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OTelMetricSummary) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OTelMetricSummary: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OTelMetricSummary: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountColumnIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObservedTimeColumnIndex", wireType) } - m.CountColumnIndex = 0 + m.ObservedTimeColumnIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17193,16 +18357,16 @@ func (m *OTelMetricSummary) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CountColumnIndex |= int64(b&0x7F) << shift + m.ObservedTimeColumnIndex |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 2: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SumColumnIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SeverityNumber", wireType) } - m.SumColumnIndex = 0 + m.SeverityNumber = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17212,16 +18376,16 @@ func (m *OTelMetricSummary) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SumColumnIndex |= int64(b&0x7F) << shift + m.SeverityNumber |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SeverityText", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17231,92 +18395,29 @@ func (m *OTelMetricSummary) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPlan } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthPlan } if postIndex > l { return io.ErrUnexpectedEOF } - m.QuantileValues = append(m.QuantileValues, &OTelMetricSummary_ValueAtQuantile{}) - if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SeverityText = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlan - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OTelMetricSummary_ValueAtQuantile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Quantile = float64(math.Float64frombits(v)) - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueColumnIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BodyColumnIndex", wireType) } - m.ValueColumnIndex = 0 + m.BodyColumnIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17326,7 +18427,7 @@ func (m *OTelMetricSummary_ValueAtQuantile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ValueColumnIndex |= int64(b&0x7F) << shift + m.BodyColumnIndex |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -17352,7 +18453,7 @@ func (m *OTelMetricSummary_ValueAtQuantile) Unmarshal(dAtA []byte) error { } return nil } -func (m *OTelAttribute) Unmarshal(dAtA []byte) error { +func (m *OTelSpan) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17375,15 +18476,15 @@ func (m *OTelAttribute) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OTelAttribute: wiretype end group for non-group") + return fmt.Errorf("proto: OTelSpan: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OTelAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OTelSpan: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NameString", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17411,11 +18512,31 @@ func (m *OTelAttribute) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Name = &OTelSpan_NameString{string(dAtA[iNdEx:postIndex])} iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NameColumnIndex", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Name = &OTelSpan_NameColumnIndex{v} + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17442,17 +18563,16 @@ func (m *OTelAttribute) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &OTelAttribute_Column{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Attributes = append(m.Attributes, &OTelAttribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &OTelAttribute_Column_{v} iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceIDColumn", wireType) } - var stringLen uint64 + m.TraceIDColumn = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17462,24 +18582,106 @@ func (m *OTelAttribute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.TraceIDColumn |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanIDColumn", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan + m.SpanIDColumn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpanIDColumn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanIDColumn", wireType) + } + m.ParentSpanIDColumn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParentSpanIDColumn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeColumnIndex", wireType) + } + m.StartTimeColumnIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimeColumnIndex |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimeColumnIndex", wireType) + } + m.EndTimeColumnIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimeColumnIndex |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KindValue", wireType) + } + m.KindValue = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KindValue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Value = &OTelAttribute_StringValue{string(dAtA[iNdEx:postIndex])} - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPlan(dAtA[iNdEx:]) @@ -17501,7 +18703,7 @@ func (m *OTelAttribute) Unmarshal(dAtA []byte) error { } return nil } -func (m *OTelAttribute_Column) Unmarshal(dAtA []byte) error { +func (m *OTelMetricGauge) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17524,17 +18726,17 @@ func (m *OTelAttribute_Column) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Column: wiretype end group for non-group") + return fmt.Errorf("proto: OTelMetricGauge: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Column: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OTelMetricGauge: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ColumnType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FloatColumnIndex", wireType) } - m.ColumnType = 0 + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17544,35 +18746,17 @@ func (m *OTelAttribute_Column) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ColumnType |= typespb.DataType(b&0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } } + m.ValueColumn = &OTelMetricGauge_FloatColumnIndex{v} case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ColumnIndex", wireType) - } - m.ColumnIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ColumnIndex |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CanBeJsonEncodedArray", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IntColumnIndex", wireType) } - var v int + var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17582,12 +18766,12 @@ func (m *OTelAttribute_Column) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.CanBeJsonEncodedArray = bool(v != 0) + m.ValueColumn = &OTelMetricGauge_IntColumnIndex{v} default: iNdEx = preIndex skippy, err := skipPlan(dAtA[iNdEx:]) @@ -17609,7 +18793,7 @@ func (m *OTelAttribute_Column) Unmarshal(dAtA []byte) error { } return nil } -func (m *OTelMetric) Unmarshal(dAtA []byte) error { +func (m *OTelMetricSummary) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17632,17 +18816,17 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OTelMetric: wiretype end group for non-group") + return fmt.Errorf("proto: OTelMetricSummary: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OTelMetric: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OTelMetricSummary: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountColumnIndex", wireType) } - var stringLen uint64 + m.CountColumnIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17652,29 +18836,16 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.CountColumnIndex |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SumColumnIndex", wireType) } - var stringLen uint64 + m.SumColumnIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17684,29 +18855,16 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.SumColumnIndex |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17716,29 +18874,92 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthPlan } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthPlan } if postIndex > l { return io.ErrUnexpectedEOF } - m.Unit = string(dAtA[iNdEx:postIndex]) + m.QuantileValues = append(m.QuantileValues, &OTelMetricSummary_ValueAtQuantile{}) + if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + default: + iNdEx = preIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlan + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OTelMetricSummary_ValueAtQuantile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Quantile = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueColumnIndex", wireType) + } + m.ValueColumnIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17748,31 +18969,66 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.ValueColumnIndex |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { + default: + iNdEx = preIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthPlan } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Attributes = append(m.Attributes, &OTelAttribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OTelAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeColumnIndex", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - m.TimeColumnIndex = 0 + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OTelAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OTelAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17782,14 +19038,27 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeColumnIndex |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 101: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17816,17 +19085,17 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &OTelMetricGauge{} + v := &OTelAttribute_Column{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Data = &OTelMetric_Gauge{v} + m.Value = &OTelAttribute_Column_{v} iNdEx = postIndex - case 102: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17836,27 +19105,132 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPlan } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthPlan } if postIndex > l { return io.ErrUnexpectedEOF } - v := &OTelMetricSummary{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Value = &OTelAttribute_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { return err } - m.Data = &OTelMetric_Summary{v} - iNdEx = postIndex + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlan + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OTelAttribute_Column) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Column: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Column: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnType", wireType) + } + m.ColumnType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ColumnType |= typespb.DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnIndex", wireType) + } + m.ColumnIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ColumnIndex |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CanBeJsonEncodedArray", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CanBeJsonEncodedArray = bool(v != 0) default: iNdEx = preIndex skippy, err := skipPlan(dAtA[iNdEx:]) @@ -17878,7 +19252,7 @@ func (m *OTelMetric) Unmarshal(dAtA []byte) error { } return nil } -func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { +func (m *OTelMetric) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17901,15 +19275,15 @@ func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OTelEndpointConfig: wiretype end group for non-group") + return fmt.Errorf("proto: OTelMetric: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OTelEndpointConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OTelMetric: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17937,13 +19311,13 @@ func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -17953,44 +19327,313 @@ func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPlan } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthPlan } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Headers == nil { - m.Headers = make(map[string]string) + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, &OTelAttribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeColumnIndex", wireType) + } + m.TimeColumnIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeColumnIndex |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 101: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &OTelMetricGauge{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &OTelMetric_Gauge{v} + iNdEx = postIndex + case 102: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &OTelMetricSummary{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &OTelMetric_Summary{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlan + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OTelEndpointConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OTelEndpointConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { @@ -18064,13 +19707,487 @@ func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Headers[mapkey] = mapvalue + m.Headers[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Insecure = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + m.Timeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlan + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClickHouseConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClickHouseConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClickHouseConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Database = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlan + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OTelResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OTelResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OTelResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, &OTelAttribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlan + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OTelExportSinkOperator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OTelExportSinkOperator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OTelExportSinkOperator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EndpointConfig == nil { + m.EndpointConfig = &OTelEndpointConfig{} + } + if err := m.EndpointConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &OTelResource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -18080,17 +20197,31 @@ func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Insecure = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthPlan + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &OTelMetric{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) } - m.Timeout = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -18100,64 +20231,29 @@ func (m *OTelEndpointConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Timeout |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipPlan(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthPlan } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OTelResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlan + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPlan } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Spans = append(m.Spans, &OTelSpan{}) + if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OTelResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OTelResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18184,8 +20280,8 @@ func (m *OTelResource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Attributes = append(m.Attributes, &OTelAttribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Logs = append(m.Logs, &OTelLog{}) + if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18210,7 +20306,7 @@ func (m *OTelResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *OTelExportSinkOperator) Unmarshal(dAtA []byte) error { +func (m *ClickHouseExportSinkOperator) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18233,15 +20329,15 @@ func (m *OTelExportSinkOperator) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OTelExportSinkOperator: wiretype end group for non-group") + return fmt.Errorf("proto: ClickHouseExportSinkOperator: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OTelExportSinkOperator: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClickHouseExportSinkOperator: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClickhouseConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18268,18 +20364,18 @@ func (m *OTelExportSinkOperator) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EndpointConfig == nil { - m.EndpointConfig = &OTelEndpointConfig{} + if m.ClickhouseConfig == nil { + m.ClickhouseConfig = &ClickHouseConfig{} } - if err := m.EndpointConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ClickhouseConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -18289,31 +20385,27 @@ func (m *OTelExportSinkOperator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPlan } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthPlan } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &OTelResource{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TableName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ColumnMappings", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18340,16 +20432,66 @@ func (m *OTelExportSinkOperator) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Metrics = append(m.Metrics, &OTelMetric{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ColumnMappings = append(m.ColumnMappings, &ClickHouseExportSinkOperator_ColumnMapping{}) + if err := m.ColumnMappings[len(m.ColumnMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + default: + iNdEx = preIndex + skippy, err := skipPlan(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPlan + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClickHouseExportSinkOperator_ColumnMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ColumnMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ColumnMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InputColumnIndex", wireType) + } + m.InputColumnIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -18359,31 +20501,16 @@ func (m *OTelExportSinkOperator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.InputColumnIndex |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthPlan - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlan - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Spans = append(m.Spans, &OTelSpan{}) - if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClickhouseColumnName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowPlan @@ -18393,26 +20520,43 @@ func (m *OTelExportSinkOperator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthPlan } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthPlan } if postIndex > l { return io.ErrUnexpectedEOF } - m.Logs = append(m.Logs, &OTelLog{}) - if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ClickhouseColumnName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnType", wireType) + } + m.ColumnType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlan + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ColumnType |= typespb.DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipPlan(dAtA[iNdEx:]) diff --git a/src/carnot/planpb/plan.proto b/src/carnot/planpb/plan.proto index 25d4d2af3c9..738b4793c08 100644 --- a/src/carnot/planpb/plan.proto +++ b/src/carnot/planpb/plan.proto @@ -156,7 +156,6 @@ message Operator { // ClickHouseExportSinkOperator writes the input table to a ClickHouse database. ClickHouseExportSinkOperator clickhouse_sink_op = 16; } - map context = 15; } // Fetches data from in-memory source. diff --git a/src/carnot/planpb/test_proto.h b/src/carnot/planpb/test_proto.h index 42c574f8217..227d5ad7dd2 100644 --- a/src/carnot/planpb/test_proto.h +++ b/src/carnot/planpb/test_proto.h @@ -1060,10 +1060,6 @@ constexpr char kPlanWithOTelExport[] = R"proto( id: 1 op { op_type: MEMORY_SOURCE_OPERATOR - context: { - key: "mutation_id" - value: "mutation" - } mem_source_op { name: "numbers" column_idxs: 0 @@ -1082,10 +1078,6 @@ constexpr char kPlanWithOTelExport[] = R"proto( id: 2 op { op_type: OTEL_EXPORT_SINK_OPERATOR - context: { - key: "mutation_id" - value: "mutation" - } otel_sink_op { endpoint_config { url: "0.0.0.0:55690" diff --git a/src/common/json/json.h b/src/common/json/json.h index d4e38338d2d..7dab5ceef7e 100644 --- a/src/common/json/json.h +++ b/src/common/json/json.h @@ -126,27 +126,6 @@ std::string ToJSONString(const T& x) { return sb.GetString(); } -inline std::string RapidJSONTypeToString(rapidjson::Type type) { - switch (type) { - case rapidjson::kNullType: - return "Null"; - case rapidjson::kFalseType: - return "False"; - case rapidjson::kTrueType: - return "True"; - case rapidjson::kObjectType: - return "Object"; - case rapidjson::kArrayType: - return "Array"; - case rapidjson::kStringType: - return "String"; - case rapidjson::kNumberType: - return "Number"; - default: - return "Unknown"; - } -} - /* * Exposes a limited set of APIs to build JSON string, with mixed data structures; which could not * be processed by the above ToJSONString(). diff --git a/src/experimental/standalone_pem/BUILD.bazel b/src/experimental/standalone_pem/BUILD.bazel index 189842536ac..d7ebafcf122 100644 --- a/src/experimental/standalone_pem/BUILD.bazel +++ b/src/experimental/standalone_pem/BUILD.bazel @@ -50,7 +50,6 @@ pl_cc_library( "//src/vizier/funcs:cc_library", "//src/vizier/funcs/context:cc_library", "//src/vizier/services/agent/shared/base:cc_library", - "//src/vizier/services/metadata/local:cc_library", "@com_github_grpc_grpc//:grpc++", ], ) diff --git a/src/experimental/standalone_pem/file_source_manager.cc b/src/experimental/standalone_pem/file_source_manager.cc deleted file mode 100644 index 11727480abd..00000000000 --- a/src/experimental/standalone_pem/file_source_manager.cc +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include - -#include "src/common/base/base.h" -#include "src/experimental/standalone_pem/file_source_manager.h" - -constexpr auto kUpdateInterval = std::chrono::seconds(2); - -namespace px { -namespace vizier { -namespace agent { - -FileSourceManager::FileSourceManager(px::event::Dispatcher* dispatcher, - stirling::Stirling* stirling, - table_store::TableStore* table_store) - : dispatcher_(dispatcher), stirling_(stirling), table_store_(table_store) { - file_source_monitor_timer_ = - dispatcher_->CreateTimer(std::bind(&FileSourceManager::Monitor, this)); - // Kick off the background monitor. - file_source_monitor_timer_->EnableTimer(kUpdateInterval); -} - -std::string FileSourceManager::DebugString() const { - std::lock_guard lock(mu_); - std::stringstream ss; - auto now = std::chrono::steady_clock::now(); - ss << absl::Substitute("File Source Manager Debug State:\n"); - ss << absl::Substitute("ID\tNAME\tCURRENT_STATE\tEXPECTED_STATE\tlast_updated\n"); - for (const auto& [id, file_source] : file_sources_) { - ss << absl::Substitute( - "$0\t$1\t$2\t$3\t$4 seconds\n", id.str(), file_source.name, - statuspb::LifeCycleState_Name(file_source.current_state), - statuspb::LifeCycleState_Name(file_source.expected_state), - std::chrono::duration_cast(now - file_source.last_updated_at) - .count()); - } - return ss.str(); -} - -Status FileSourceManager::HandleRegisterFileSourceRequest(sole::uuid id, std::string file_name) { - LOG(INFO) << "Registering file source: " << file_name; - - FileSourceInfo info; - info.name = file_name; - info.id = id; - info.expected_state = statuspb::RUNNING_STATE; - info.current_state = statuspb::PENDING_STATE; - info.last_updated_at = dispatcher_->GetTimeSource().MonotonicTime(); - stirling_->RegisterFileSource(id, file_name); - { - std::lock_guard lock(mu_); - file_sources_[id] = std::move(info); - file_source_name_map_[file_name] = id; - } - return Status::OK(); -} - -Status FileSourceManager::HandleRemoveFileSourceRequest( - sole::uuid id, const messages::FileSourceMessage& /*msg*/) { - std::lock_guard lock(mu_); - auto it = file_sources_.find(id); - if (it == file_sources_.end()) { - return error::NotFound("File source with ID: $0, not found", id.str()); - } - - it->second.expected_state = statuspb::TERMINATED_STATE; - return stirling_->RemoveFileSource(id); -} - -void FileSourceManager::Monitor() { - std::lock_guard lock(mu_); - - for (auto& [id, file_source] : file_sources_) { - auto s_or_publish = stirling_->GetFileSourceInfo(id); - statuspb::LifeCycleState current_state; - // Get the latest current state according to stirling. - if (s_or_publish.ok()) { - current_state = statuspb::RUNNING_STATE; - } else { - switch (s_or_publish.code()) { - case statuspb::FAILED_PRECONDITION: - // Means the binary has not been found. - current_state = statuspb::FAILED_STATE; - break; - case statuspb::RESOURCE_UNAVAILABLE: - current_state = statuspb::PENDING_STATE; - break; - case statuspb::NOT_FOUND: - // Means we didn't actually find the probe. If we requested termination, - // it's because the probe has been removed. - current_state = (file_source.expected_state == statuspb::TERMINATED_STATE) - ? statuspb::TERMINATED_STATE - : statuspb::UNKNOWN_STATE; - break; - default: - current_state = statuspb::FAILED_STATE; - break; - } - } - - if (current_state != statuspb::RUNNING_STATE && - file_source.expected_state == statuspb::TERMINATED_STATE) { - current_state = statuspb::TERMINATED_STATE; - } - - if (current_state == file_source.current_state) { - // No state transition, nothing to do. - continue; - } - - // The following transitions are legal: - // 1. Pending -> Terminated: Probe is stopped before starting. - // 2. Pending -> Running : Probe starts up. - // 3. Running -> Terminated: Probe is stopped. - // 4. Running -> Failed: Probe got dettached because binary died. - // 5. Failed -> Running: Probe started up because binary came back to life. - // - // In all cases we basically inform the MDS. - // In the cases where we transition to running, we need to update the schemas. - - Status probe_status = Status::OK(); - LOG(INFO) << absl::Substitute("File source[$0]::$1 has transitioned $2 -> $3", id.str(), - file_source.name, - statuspb::LifeCycleState_Name(file_source.current_state), - statuspb::LifeCycleState_Name(current_state)); - // Check if running now, then update the schema. - if (current_state == statuspb::RUNNING_STATE) { - // We must have just transitioned into running. We try to apply the new schema. - // If it fails we will trigger an error and report that to MDS. - auto publish_pb = s_or_publish.ConsumeValueOrDie(); - auto s = UpdateSchema(publish_pb); - if (!s.ok()) { - current_state = statuspb::FAILED_STATE; - probe_status = s; - } - } else { - probe_status = s_or_publish.status(); - } - - file_source.current_state = current_state; - } - file_source_monitor_timer_->EnableTimer(kUpdateInterval); -} - -Status FileSourceManager::UpdateSchema(const stirling::stirlingpb::Publish& publish_pb) { - LOG(INFO) << "Updating schema for file source"; - auto relation_info_vec = ConvertPublishPBToRelationInfo(publish_pb); - - // TODO(ddelnano): Failure here can lead to an inconsistent schema state. We should - // figure out how to handle this as part of the data model refactor project. - for (const auto& relation_info : relation_info_vec) { - LOG(INFO) << absl::Substitute("Adding table: $0", relation_info.name); - table_store_->AddTable( - table_store::HotOnlyTable::Create(relation_info.name, relation_info.relation), - relation_info.name, relation_info.id); - } - return Status::OK(); -} - -FileSourceInfo* FileSourceManager::GetFileSourceInfo(std::string name) { - std::lock_guard lock(mu_); - auto pair = file_source_name_map_.find(name); - if (pair == file_source_name_map_.end()) { - return nullptr; - } - - auto id_pair = file_sources_.find(pair->second); - if (id_pair == file_sources_.end()) { - return nullptr; - } - - return &id_pair->second; -} - -} // namespace agent -} // namespace vizier -} // namespace px diff --git a/src/experimental/standalone_pem/file_source_manager.h b/src/experimental/standalone_pem/file_source_manager.h deleted file mode 100644 index 7e426bc69be..00000000000 --- a/src/experimental/standalone_pem/file_source_manager.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include -#include - -#include - -#include "src/stirling/stirling.h" -#include "src/vizier/services/agent/shared/manager/manager.h" - -namespace px { -namespace vizier { -namespace agent { - -struct FileSourceInfo { - std::string name; - sole::uuid id; - statuspb::LifeCycleState expected_state; - statuspb::LifeCycleState current_state; - std::chrono::time_point last_updated_at; -}; - -class FileSourceManager { - public: - FileSourceManager() = delete; - FileSourceManager(px::event::Dispatcher* dispatcher, stirling::Stirling* stirling, - table_store::TableStore* table_store); - - std::string DebugString() const; - Status HandleRegisterFileSourceRequest(sole::uuid id, std::string file_name); - Status HandleRemoveFileSourceRequest(sole::uuid id, const messages::FileSourceMessage& req); - FileSourceInfo* GetFileSourceInfo(std::string name); - - private: - // The tracepoint Monitor that is responsible for watching and updating the state of - // active tracepoints. - void Monitor(); - Status UpdateSchema(const stirling::stirlingpb::Publish& publish_proto); - - px::event::Dispatcher* dispatcher_; - stirling::Stirling* stirling_; - table_store::TableStore* table_store_; - - event::TimerUPtr file_source_monitor_timer_; - mutable std::mutex mu_; - absl::flat_hash_map file_sources_; - // File source name to UUID. - absl::flat_hash_map file_source_name_map_; -}; - -} // namespace agent -} // namespace vizier -} // namespace px diff --git a/src/experimental/standalone_pem/standalone_pem_manager.cc b/src/experimental/standalone_pem/standalone_pem_manager.cc index 696c0449d05..d1257dbdbfd 100644 --- a/src/experimental/standalone_pem/standalone_pem_manager.cc +++ b/src/experimental/standalone_pem/standalone_pem_manager.cc @@ -27,7 +27,6 @@ #include "src/shared/schema/utils.h" #include "src/table_store/table_store.h" #include "src/vizier/funcs/funcs.h" -#include "src/vizier/services/metadata/local/local_metadata_service.h" DEFINE_int32( table_store_data_limit, gflags::Int32FromEnv("PL_TABLE_STORE_DATA_LIMIT_MB", 1024 + 256), @@ -73,8 +72,7 @@ StandalonePEMManager::StandalonePEMManager(sole::uuid agent_id, std::string_view api_(std::make_unique(time_system_.get())), dispatcher_(api_->AllocateDispatcher("manager")), table_store_(std::make_shared()), - metadata_grpc_server_(std::make_unique(table_store_.get())), - func_context_(this, metadata_grpc_server_->StubGenerator(), /* mdtp_stub= */ nullptr, + func_context_(this, /* mds_stub= */ nullptr, /* mdtp_stub= */ nullptr, /* cronscript_stub= */ nullptr, table_store_, [](grpc::ClientContext*) {}), stirling_(px::stirling::Stirling::Create(px::stirling::CreateSourceRegistryFromFlag())), results_sink_server_(std::make_unique()) { @@ -104,16 +102,11 @@ StandalonePEMManager::StandalonePEMManager(sole::uuid agent_id, std::string_view std::move(clients_config), std::move(server_config)) .ConsumeValueOrDie(); - const std::string proc_pid_path = std::string("/proc/") + std::to_string(info_.pid); - PX_ASSIGN_OR_RETURN(auto start_time, system::GetPIDStartTimeTicks(proc_pid_path)); - mds_manager_ = std::make_unique( - info_.hostname, info_.asid, info_.pid, start_time, info_.agent_id, time_system_.get()); + info_.hostname, info_.asid, info_.pid, info_.agent_id, time_system_.get()); tracepoint_manager_ = std::make_unique(dispatcher_.get(), stirling_.get(), table_store_.get()); - file_source_manager_ = - std::make_unique(dispatcher_.get(), stirling_.get(), table_store_.get()); // Force Metadata Update. ECHECK_OK(mds_manager_->PerformMetadataStateUpdate()); } @@ -153,9 +146,9 @@ Status StandalonePEMManager::Init() { stirling_->RegisterAgentMetadataCallback( std::bind(&px::md::AgentMetadataStateManager::CurrentAgentMetadataState, mds_manager_.get())); - vizier_grpc_server_ = std::make_unique( - port_, carnot_.get(), results_sink_server_.get(), carnot_->GetEngineState(), - tracepoint_manager_.get(), file_source_manager_.get()); + vizier_grpc_server_ = + std::make_unique(port_, carnot_.get(), results_sink_server_.get(), + carnot_->GetEngineState(), tracepoint_manager_.get()); return Status::OK(); } @@ -218,20 +211,20 @@ Status StandalonePEMManager::InitSchemas() { // Special case to set the max size of the http_events table differently from the other // tables. For now, the min cold batch size is set to 256kB to be consistent with previous // behaviour. - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, http_table_size, 256 * 1024); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + http_table_size, 256 * 1024); } else if (relation_info.name == "stirling_error") { - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, stirling_error_table_size); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + stirling_error_table_size); } else if (relation_info.name == "probe_status") { - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, probe_status_table_size); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + probe_status_table_size); } else if (relation_info.name == "proc_exit_events") { - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, proc_exit_events_table_size); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + proc_exit_events_table_size); } else { - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, other_table_size); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + other_table_size); } table_store_->AddTable(std::move(table_ptr), relation_info.name, relation_info.id); diff --git a/src/experimental/standalone_pem/standalone_pem_manager.h b/src/experimental/standalone_pem/standalone_pem_manager.h index b89b47da97b..9d658b1306a 100644 --- a/src/experimental/standalone_pem/standalone_pem_manager.h +++ b/src/experimental/standalone_pem/standalone_pem_manager.h @@ -23,7 +23,6 @@ #include "src/carnot/carnot.h" #include "src/common/event/event.h" -#include "src/experimental/standalone_pem/file_source_manager.h" #include "src/experimental/standalone_pem/sink_server.h" #include "src/experimental/standalone_pem/tracepoint_manager.h" #include "src/experimental/standalone_pem/vizier_server.h" @@ -32,7 +31,6 @@ #include "src/vizier/funcs/context/vizier_context.h" #include "src/vizier/services/agent/shared/base/base_manager.h" #include "src/vizier/services/agent/shared/base/info.h" -#include "src/vizier/services/metadata/local/local_metadata_service.h" namespace px { namespace vizier { @@ -74,9 +72,6 @@ class StandalonePEMManager : public BaseManager { std::shared_ptr table_store_; - // Metadata gRPC server must be initialized before func_context_ - std::unique_ptr metadata_grpc_server_; - // Factory context for vizier functions. funcs::VizierFuncFactoryContext func_context_; @@ -92,9 +87,6 @@ class StandalonePEMManager : public BaseManager { // Tracepoints std::unique_ptr tracepoint_manager_; - - // FileSource manager - std::unique_ptr file_source_manager_; }; } // namespace agent diff --git a/src/experimental/standalone_pem/tracepoint_manager.cc b/src/experimental/standalone_pem/tracepoint_manager.cc index 240050d74b9..f05772f0a04 100644 --- a/src/experimental/standalone_pem/tracepoint_manager.cc +++ b/src/experimental/standalone_pem/tracepoint_manager.cc @@ -178,9 +178,8 @@ Status TracepointManager::UpdateSchema(const stirling::stirlingpb::Publish& publ // TODO(zasgar): Failure here can lead to an inconsistent schema state. We should // // figure out how to handle this as part of the data model refactor project. for (const auto& relation_info : relation_info_vec) { - table_store_->AddTable( - table_store::HotColdTable::Create(relation_info.name, relation_info.relation), - relation_info.name, relation_info.id); + table_store_->AddTable(table_store::Table::Create(relation_info.name, relation_info.relation), + relation_info.name, relation_info.id); } return Status::OK(); } diff --git a/src/experimental/standalone_pem/vizier_server.h b/src/experimental/standalone_pem/vizier_server.h index 1968e0fe96d..44856ff585a 100644 --- a/src/experimental/standalone_pem/vizier_server.h +++ b/src/experimental/standalone_pem/vizier_server.h @@ -50,13 +50,11 @@ class VizierServer final : public api::vizierpb::VizierService::Service { public: VizierServer() = delete; VizierServer(carnot::Carnot* carnot, px::vizier::agent::StandaloneGRPCResultSinkServer* svr, - px::carnot::EngineState* engine_state, TracepointManager* tp_manager, - FileSourceManager* file_source_manager) { + px::carnot::EngineState* engine_state, TracepointManager* tp_manager) { carnot_ = carnot; sink_server_ = svr; engine_state_ = engine_state; tp_manager_ = tp_manager; - file_source_manager_ = file_source_manager; } ::grpc::Status ExecuteScript( @@ -82,7 +80,6 @@ class VizierServer final : public api::vizierpb::VizierService::Service { auto mutations = mutations_or_s.ConsumeValueOrDie(); auto deployments = mutations->Deployments(); - auto file_source_deployments = mutations->FileSourceDeployments(); bool tracepoints_running = true; auto ntp_info = TracepointInfo{}; @@ -120,35 +117,6 @@ class VizierServer final : public api::vizierpb::VizierService::Service { response->Write(mutation_resp); return ::grpc::Status::CANCELLED; } - - auto file_sources_running = true; - auto nfile_source_info = FileSourceInfo{}; - for (size_t i = 0; i < file_source_deployments.size(); i++) { - auto file_source = file_source_deployments[i]; - auto file_source_info = file_source_manager_->GetFileSourceInfo(file_source.glob_pattern()); - if (file_source_info == nullptr) { - auto s = file_source_manager_->HandleRegisterFileSourceRequest( - sole::uuid4(), file_source.glob_pattern()); - if (!s.ok()) { - return ::grpc::Status(grpc::StatusCode::INTERNAL, "Failed to register file source"); - } - nfile_source_info.name = file_source.glob_pattern(); - nfile_source_info.current_state = statuspb::PENDING_STATE; - file_source_info = &nfile_source_info; - } - if (file_source_info->current_state != statuspb::RUNNING_STATE) { - file_sources_running = false; - } - } - if (!file_sources_running) { - auto m_info = mutation_resp.mutable_mutation_info(); - m_info->mutable_status()->set_code(grpc::StatusCode::UNAVAILABLE); - response->Write(mutation_resp); - return ::grpc::Status::CANCELLED; - } - /* auto m_info = mutation_resp.mutable_mutation_info(); */ - /* m_info->mutable_status()->set_code(0); */ - /* response->Write(mutation_resp); */ } LOG(INFO) << "Compiling and running query"; // Send schema before sending query results. @@ -230,7 +198,6 @@ class VizierServer final : public api::vizierpb::VizierService::Service { px::vizier::agent::StandaloneGRPCResultSinkServer* sink_server_; px::carnot::EngineState* engine_state_; TracepointManager* tp_manager_; - FileSourceManager* file_source_manager_; }; class VizierGRPCServer { @@ -238,10 +205,8 @@ class VizierGRPCServer { VizierGRPCServer() = delete; VizierGRPCServer(int port, carnot::Carnot* carnot, px::vizier::agent::StandaloneGRPCResultSinkServer* svr, - carnot::EngineState* engine_state, TracepointManager* tp_manager, - FileSourceManager* file_source_manager) - : vizier_server_(std::make_unique(carnot, svr, engine_state, tp_manager, - file_source_manager)) { + carnot::EngineState* engine_state, TracepointManager* tp_manager) + : vizier_server_(std::make_unique(carnot, svr, engine_state, tp_manager)) { grpc::ServerBuilder builder; std::string uri = absl::Substitute("0.0.0.0:$0", port); diff --git a/src/pxl_scripts/px/pipeline_flow_graph/manifest.yaml b/src/pxl_scripts/px/pipeline_flow_graph/manifest.yaml deleted file mode 100644 index 178d1cc9659..00000000000 --- a/src/pxl_scripts/px/pipeline_flow_graph/manifest.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -short: Overview of Pipeline throughput -long: > - This view displays a summary of the throughput of the pipeline. diff --git a/src/pxl_scripts/px/pipeline_flow_graph/pipeline_flow_graph.pxl b/src/pxl_scripts/px/pipeline_flow_graph/pipeline_flow_graph.pxl deleted file mode 100644 index f8dc4466a4b..00000000000 --- a/src/pxl_scripts/px/pipeline_flow_graph/pipeline_flow_graph.pxl +++ /dev/null @@ -1,82 +0,0 @@ -import px - -kelvin_dest = "unknown" -bpf_source_op_start = 10000 -memory_source_op = 1000 # This corresponds to a file source -file_source_op = 2 -# TODO(ddelnano): This currently can't tell the difference -# between an internal and external grpc sink. -grpc_sink_op = 9100 -otel_export_op = 9200 - -def final_dest_to_str(dest): - return px.select(dest == otel_export_op, "Otel Export", kelvin_dest) - -def get_memory_source_sink_results(df, min_asid): - file_sources = px.GetFileSourceStatus() - file_sources.stream_id = file_sources.file_source_id - - tracepoint_sources = px.GetTracepointStatus() - tracepoint_sources.stream_id = tracepoint_sources.tracepoint_id_str - - df = df[df.destination > bpf_source_op_start or df.destination == memory_source_op] - file_sources_df = df.merge(file_sources, how='left', left_on='stream_id', right_on='file_source_id') - file_sources_df = file_sources_df['time_', 'upid', 'pod', 'name', 'bytes_transferred', 'destination', 'stream_id_x', 'stream_id_y', 'match'] - tracepoint_sources_df = df.merge(tracepoint_sources, how='left', left_on='stream_id', right_on='tracepoint_id_str') - tracepoint_sources_df = tracepoint_sources_df['time_', 'upid', 'pod', 'name', 'bytes_transferred', 'destination', 'stream_id_x', 'stream_id_y', 'match'] - - df = file_sources_df.append(tracepoint_sources_df) - - # stream_id_y is the column from the file_sources UDTF after the merge - df.is_bpf_source = df.stream_id_y == "" - df = df.merge(min_asid, how='left', left_on='match', right_on='match') - - df.to_entity = df.pod - df.from_entity = px.select(df.is_bpf_source, px.pipeline_dest_to_name(df.destination), df.name) + " " + px.itoa(px.upid_to_asid(df.upid) - df.min_asid) - df = df['time_', 'from_entity', 'to_entity', 'bytes_transferred'] - df = df.groupby(['from_entity', 'to_entity']).agg( - total_bytes=('bytes_transferred', px.sum), - ) - - return df - -def pipeline_flow_graph(start_time: str): - agents = px.GetAgentStatus() - kelvin = agents[px.contains(agents.hostname, "kelvin")] - min_asid = agents.agg(min_asid=('asid', px.min)) - min_asid.match = True - - df = px.DataFrame('sink_results', start_time=start_time) - df.pod = df.ctx['pod'] - df.match = True - - mem_source_sink_results = get_memory_source_sink_results(df, min_asid) - - df = df[df.destination == otel_export_op or df.destination == grpc_sink_op] - df.final_dest = final_dest_to_str(df.destination) - - # Use a dummy column that matches in both data frames - # so the Kelvin hostname join works as expected - kelvin.match = True - - # For external GRPC sinks, df.pod will be empty and kelvin_dest will be "unknown" - df.is_dest_kelvin = px.select(df.final_dest == kelvin_dest and df.pod != "", True, False) - df.final_dest = px.select(not df.is_dest_kelvin and df.final_dest == kelvin_dest, "px.display", df.final_dest) - df = df.merge(kelvin, how='left', left_on='match', right_on='match') - # Remove the port from the ip_address column from the GetAgentStatus UDTF - df.ip_address = px.pluck_array(px.split(df.ip_address, ":"), 0) - df.kelvin_pod = px.pod_id_to_pod_name(px.ip_to_pod_id(df.ip_address)) - - df.from_entity = px.select(df.is_dest_kelvin, df.pod, df.kelvin_pod) - df.to_entity = px.select(df.is_dest_kelvin, df.kelvin_pod, df.final_dest) - - df = df.groupby(['from_entity', 'to_entity']).agg( - total_bytes=('bytes_transferred', px.sum), - ) - - df = df.append(mem_source_sink_results) - df = df[px.substring(df.from_entity, 0, 7) != "unknown"] - df.total_time = px.abs(px.parse_duration(start_time)) / px.pow(10, 9) - df.bytes_throughput = df.total_bytes / df.total_time - return df - diff --git a/src/pxl_scripts/px/pipeline_flow_graph/vis.json b/src/pxl_scripts/px/pipeline_flow_graph/vis.json deleted file mode 100644 index aba41d05c23..00000000000 --- a/src/pxl_scripts/px/pipeline_flow_graph/vis.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "variables": [ - { - "name": "start_time", - "type": "PX_STRING", - "description": "The start time of the window in time units before now.", - "defaultValue": "-5m" - } - ], - "globalFuncs": [ - { - "outputName": "pipeline_flow", - "func": { - "name": "pipeline_flow_graph", - "args": [ - { - "name": "start_time", - "variable": "start_time" - } - ] - } - } - ], - "widgets": [ - { - "name": "Pipeline Flow Graph", - "position": { - "x": 0, - "y": 0, - "w": 12, - "h": 4 - }, - "globalFuncOutputName": "pipeline_flow", - "displaySpec": { - "@type": "types.px.dev/px.vispb.Graph", - "adjacencyList": { - "fromColumn": "from_entity", - "toColumn": "to_entity" - }, - "edgeWeightColumn": "bytes_throughput", - "edgeHoverInfo": [ - "bytes_throughput" - ], - "enableDefaultHierarchy": true, - "edgeLength": 500 - } - } - ] -} diff --git a/src/shared/metadata/metadata_state.cc b/src/shared/metadata/metadata_state.cc index e09f7fa7301..098d95179c5 100644 --- a/src/shared/metadata/metadata_state.cc +++ b/src/shared/metadata/metadata_state.cc @@ -569,7 +569,7 @@ Status K8sMetadataState::CleanupExpiredMetadata(int64_t now, int64_t retention_t std::shared_ptr AgentMetadataState::CloneToShared() const { auto state = - std::make_shared(hostname_, asid_, pid_, start_time_, agent_id_, pod_name_, vizier_id_, + std::make_shared(hostname_, asid_, pid_, agent_id_, pod_name_, vizier_id_, vizier_name_, vizier_namespace_, time_system_); state->last_update_ts_ns_ = last_update_ts_ns_; state->epoch_id_ = epoch_id_; diff --git a/src/shared/metadata/metadata_state.h b/src/shared/metadata/metadata_state.h index 95957de23dc..e2fdc9e6c86 100644 --- a/src/shared/metadata/metadata_state.h +++ b/src/shared/metadata/metadata_state.h @@ -341,14 +341,13 @@ class K8sMetadataState : NotCopyable { class AgentMetadataState : NotCopyable { public: AgentMetadataState() = delete; - AgentMetadataState(std::string_view hostname, uint32_t asid, uint32_t pid, uint64_t start_time, AgentID agent_id, + AgentMetadataState(std::string_view hostname, uint32_t asid, uint32_t pid, AgentID agent_id, std::string_view pod_name, sole::uuid vizier_id, std::string_view vizier_name, std::string_view vizier_namespace, event::TimeSystem* time_system) : hostname_(std::string(hostname)), pod_name_(std::string(pod_name)), asid_(asid), pid_(pid), - start_time_(start_time), agent_id_(agent_id), vizier_id_(vizier_id), vizier_name_(std::string(vizier_name)), @@ -361,7 +360,6 @@ class AgentMetadataState : NotCopyable { uint32_t pid() const { return pid_; } const std::string& pod_name() const { return pod_name_; } const sole::uuid& agent_id() const { return agent_id_; } - const md::UPID agent_upid() const { return md::UPID(asid_, pid_, start_time_); } const sole::uuid& vizier_id() const { return vizier_id_; } const std::string& vizier_name() const { return vizier_name_; } @@ -435,7 +433,6 @@ class AgentMetadataState : NotCopyable { std::string pod_name_; uint32_t asid_; uint32_t pid_; - uint64_t start_time_; AgentID agent_id_; sole::uuid vizier_id_; diff --git a/src/shared/metadata/standalone_state_manager.h b/src/shared/metadata/standalone_state_manager.h index a353f470682..82cb16030ed 100644 --- a/src/shared/metadata/standalone_state_manager.h +++ b/src/shared/metadata/standalone_state_manager.h @@ -35,9 +35,9 @@ namespace md { */ class StandaloneAgentMetadataStateManager : public AgentMetadataStateManager { public: - StandaloneAgentMetadataStateManager(std::string_view hostname, uint32_t asid, uint32_t pid, uint64_t start_time, + StandaloneAgentMetadataStateManager(std::string_view hostname, uint32_t asid, uint32_t pid, sole::uuid agent_id, event::TimeSystem* time_system) { - agent_metadata_state_ = std::make_shared(hostname, asid, pid, start_time, agent_id, + agent_metadata_state_ = std::make_shared(hostname, asid, pid, agent_id, /*pod_name=*/"", sole::uuid(), "standalone_pem", "", time_system); } diff --git a/src/shared/metadata/state_manager.h b/src/shared/metadata/state_manager.h index 68f73f5fa37..67dec26b962 100644 --- a/src/shared/metadata/state_manager.h +++ b/src/shared/metadata/state_manager.h @@ -119,7 +119,7 @@ class AgentMetadataStateManagerImpl : public AgentMetadataStateManager { public: virtual ~AgentMetadataStateManagerImpl() = default; - AgentMetadataStateManagerImpl(std::string_view hostname, uint32_t asid, uint32_t pid, uint64_t start_time, + AgentMetadataStateManagerImpl(std::string_view hostname, uint32_t asid, uint32_t pid, std::string pod_name, sole::uuid agent_id, bool collects_data, const px::system::Config& config, AgentMetadataFilter* metadata_filter, sole::uuid vizier_id, @@ -128,7 +128,7 @@ class AgentMetadataStateManagerImpl : public AgentMetadataStateManager { : pod_name_(pod_name), collects_data_(collects_data), metadata_filter_(metadata_filter) { md_reader_ = std::make_unique(config); agent_metadata_state_ = - std::make_shared(hostname, asid, pid, start_time, agent_id, pod_name, vizier_id, + std::make_shared(hostname, asid, pid, agent_id, pod_name, vizier_id, vizier_name, vizier_namespace, time_system); } diff --git a/src/shared/schema/utils.cc b/src/shared/schema/utils.cc index fde9bc093b2..c17e5fbffb3 100644 --- a/src/shared/schema/utils.cc +++ b/src/shared/schema/utils.cc @@ -35,19 +35,13 @@ table_store::schema::Relation InfoClassProtoToRelation( RelationInfo ConvertInfoClassPBToRelationInfo( const stirling::stirlingpb::InfoClass& info_class_pb) { - auto schema = info_class_pb.schema(); - std::optional mutation_id; - if (schema.mutation_id() != "") { - mutation_id = schema.mutation_id(); - } if (info_class_pb.schema().tabletized()) { - return RelationInfo(schema.name(), info_class_pb.id(), schema.desc(), - schema.tabletization_key(), mutation_id, + return RelationInfo(info_class_pb.schema().name(), info_class_pb.id(), + info_class_pb.schema().desc(), info_class_pb.schema().tabletization_key(), InfoClassProtoToRelation(info_class_pb)); } return RelationInfo(info_class_pb.schema().name(), info_class_pb.id(), - info_class_pb.schema().desc(), mutation_id, - InfoClassProtoToRelation(info_class_pb)); + info_class_pb.schema().desc(), InfoClassProtoToRelation(info_class_pb)); } } // namespace diff --git a/src/shared/schema/utils.h b/src/shared/schema/utils.h index 0b586f8d34c..991edda5340 100644 --- a/src/shared/schema/utils.h +++ b/src/shared/schema/utils.h @@ -32,22 +32,20 @@ namespace px { struct RelationInfo { RelationInfo() = default; RelationInfo(std::string name, uint64_t id, std::string desc, - std::optional mutation_id, table_store::schema::Relation relation) + table_store::schema::Relation relation) : name(std::move(name)), id(id), desc(std::move(desc)), tabletized(false), - mutation_id(mutation_id), relation(std::move(relation)) {} RelationInfo(std::string name, uint64_t id, std::string desc, uint64_t tabletization_key_idx, - std::optional mutation_id, table_store::schema::Relation relation) + table_store::schema::Relation relation) : name(std::move(name)), id(id), desc(std::move(desc)), tabletized(true), tabletization_key_idx(tabletization_key_idx), - mutation_id(mutation_id), relation(std::move(relation)) {} std::string name; @@ -55,7 +53,6 @@ struct RelationInfo { std::string desc; bool tabletized; uint64_t tabletization_key_idx; - std::optional mutation_id; table_store::schema::Relation relation; }; diff --git a/src/stirling/BUILD.bazel b/src/stirling/BUILD.bazel index 2ebf6af5286..281236e5dea 100644 --- a/src/stirling/BUILD.bazel +++ b/src/stirling/BUILD.bazel @@ -49,7 +49,6 @@ pl_cc_library( "//src/stirling/proto:stirling_pl_cc_proto", "//src/stirling/source_connectors/dynamic_bpftrace:cc_library", "//src/stirling/source_connectors/dynamic_tracer:cc_library", - "//src/stirling/source_connectors/file_source:cc_library", "//src/stirling/source_connectors/jvm_stats:cc_library", "//src/stirling/source_connectors/network_stats:cc_library", "//src/stirling/source_connectors/perf_profiler:cc_library", diff --git a/src/stirling/core/BUILD.bazel b/src/stirling/core/BUILD.bazel index 587f46b427c..ab795229aad 100644 --- a/src/stirling/core/BUILD.bazel +++ b/src/stirling/core/BUILD.bazel @@ -32,7 +32,6 @@ pl_cc_library( "//src/stirling/source_connectors/cpu_stat_bpftrace:__pkg__", "//src/stirling/source_connectors/dynamic_bpftrace:__pkg__", "//src/stirling/source_connectors/dynamic_tracer:__pkg__", - "//src/stirling/source_connectors/file_source:__pkg__", "//src/stirling/source_connectors/jvm_stats:__pkg__", "//src/stirling/source_connectors/network_stats:__pkg__", "//src/stirling/source_connectors/perf_profiler:__pkg__", diff --git a/src/stirling/core/info_class_manager.cc b/src/stirling/core/info_class_manager.cc index 19cb1fa91f7..82483a8e180 100644 --- a/src/stirling/core/info_class_manager.cc +++ b/src/stirling/core/info_class_manager.cc @@ -32,12 +32,8 @@ void InfoClassManager::InitContext(ConnectorContext* ctx) { source_->InitContext stirlingpb::InfoClass InfoClassManager::ToProto() const { stirlingpb::InfoClass info_class_proto; - auto schema = info_class_proto.mutable_schema(); - schema->CopyFrom(schema_.ToProto()); + info_class_proto.mutable_schema()->CopyFrom(schema_.ToProto()); info_class_proto.set_id(id()); - if (mutation_id_.has_value()) { - schema->set_mutation_id(mutation_id_.value()); - } return info_class_proto; } diff --git a/src/stirling/core/info_class_manager.h b/src/stirling/core/info_class_manager.h index 98a5cf05f9f..dc929a871d7 100644 --- a/src/stirling/core/info_class_manager.h +++ b/src/stirling/core/info_class_manager.h @@ -73,13 +73,6 @@ class InfoClassManager final : public NotCopyable { */ void SetSourceConnector(SourceConnector* source) { source_ = source; } - /** - * @brief Mutation ID connector connected to this Info Class if one exists - * - * @param source Pointer to source connector instance. - */ - void SetMutationId(std::optional mutation_id) { mutation_id_ = mutation_id; } - /** * Get the schema of the InfoClass. * @@ -135,9 +128,6 @@ class InfoClassManager final : public NotCopyable { // Pointer to the data table where the data is stored. std::unique_ptr data_table_; - - // The mutation ID of the info class manager if one exists. - std::optional mutation_id_; }; using InfoClassManagerVec = std::vector>; diff --git a/src/stirling/core/info_class_manager_test.cc b/src/stirling/core/info_class_manager_test.cc index f8440c9b856..c67f78e24fe 100644 --- a/src/stirling/core/info_class_manager_test.cc +++ b/src/stirling/core/info_class_manager_test.cc @@ -27,7 +27,6 @@ namespace stirling { using types::DataType; using types::PatternType; -// TODO(ddelnano): Add test regarding ToProto and SetMutationId. TEST(InfoClassInfoSchemaTest, infoclass_mgr_proto_getters_test) { InfoClassManager info_class_mgr(SeqGenConnector::kSeq0Table); auto source = SeqGenConnector::Create("sequences"); diff --git a/src/stirling/core/source_connector.cc b/src/stirling/core/source_connector.cc index ae2373c8fbb..54fa5137cc3 100644 --- a/src/stirling/core/source_connector.cc +++ b/src/stirling/core/source_connector.cc @@ -20,7 +20,7 @@ #include #include -#include +#include #include "src/stirling/core/source_connector.h" @@ -61,7 +61,7 @@ void SourceConnector::PushData(DataPushCallback agent_callback) { Status s = agent_callback( data_table->id(), record_batch.tablet_id, std::make_unique(std::move(record_batch.records))); - LOG_IF(ERROR, !s.ok()) << absl::Substitute("Failed to push data. Message = $0", s.msg()); + LOG_IF(DFATAL, !s.ok()) << absl::Substitute("Failed to push data. Message = $0", s.msg()); } } } diff --git a/src/stirling/proto/stirling.proto b/src/stirling/proto/stirling.proto index e0d1b374c23..ab36ce6297c 100644 --- a/src/stirling/proto/stirling.proto +++ b/src/stirling/proto/stirling.proto @@ -48,7 +48,6 @@ message TableSchema { repeated Element elements = 2; bool tabletized = 3; uint64 tabletization_key = 4; - string mutation_id = 6; } // InfoClass stores a set of Elements that share common timestamps (i.e., they are diff --git a/src/stirling/source_connectors/file_source/BUILD.bazel b/src/stirling/source_connectors/file_source/BUILD.bazel deleted file mode 100644 index 11dbfdc1630..00000000000 --- a/src/stirling/source_connectors/file_source/BUILD.bazel +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2018- The Pixie Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -load("//bazel:pl_build_system.bzl", "pl_cc_bpf_test", "pl_cc_library", "pl_cc_test") - -package(default_visibility = ["//src/stirling:__subpackages__"]) - -pl_cc_library( - name = "cc_library", - srcs = glob( - ["*.cc"], - exclude = [ - "**/*_test.cc", - ], - ), - hdrs = glob(["*.h"]), - deps = [ - "//src/stirling/core:cc_library", - "//src/stirling/utils:cc_library", - "@com_github_tencent_rapidjson//:rapidjson", - ], -) - -pl_cc_test( - name = "file_source_connector_test", - srcs = ["file_source_connector_test.cc"], - data = [ - "testdata/test.json", - "testdata/unsupported.json", - ], - deps = [ - ":cc_library", - ], -) - -pl_cc_test( - name = "stirling_fs_test", - srcs = ["stirling_fs_test.cc"], - data = [ - "testdata/test.json", - "testdata/unsupported.json", - ], - deps = [ - ":cc_library", - "//src/stirling:cc_library", - ], -) diff --git a/src/stirling/source_connectors/file_source/file_source_connector.cc b/src/stirling/source_connectors/file_source/file_source_connector.cc deleted file mode 100644 index 112c472ce05..00000000000 --- a/src/stirling/source_connectors/file_source/file_source_connector.cc +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include "src/stirling/source_connectors/file_source/file_source_connector.h" - -#include -#include - -#include -#include - -using px::StatusOr; - -constexpr size_t kMaxStringBytes = std::numeric_limits::max(); - -namespace px { -namespace stirling { - -using px::utils::RapidJSONTypeToString; - -StatusOr DataElementsFromJSON(std::ifstream& f_stream) { - std::string line; - std::getline(f_stream, line); - - if (f_stream.eof()) { - return error::Internal("Failed to read file, hit EOF"); - } - - rapidjson::Document d; - rapidjson::ParseResult ok = d.Parse(line.c_str()); - if (!ok) { - return error::Internal("Failed to parse JSON: $0 $1", line, - rapidjson::GetParseError_En(ok.Code())); - } - auto elements = d.MemberCount() + 2; // Add additional columns for time_ - BackedDataElements data_elements(elements); - - data_elements.emplace_back("time_", "", types::DataType::TIME64NS); - // TODO(ddelnano): Make it configurable to request a UUID in PxL rather than creating it by default. - data_elements.emplace_back("uuid", "", types::DataType::UINT128); - for (rapidjson::Value::ConstMemberIterator itr = d.MemberBegin(); itr != d.MemberEnd(); ++itr) { - auto name = itr->name.GetString(); - const auto& value = itr->value; - types::DataType col_type; - - if (value.IsInt()) { - col_type = types::DataType::INT64; - } else if (value.IsDouble()) { - col_type = types::DataType::FLOAT64; - } else if (value.IsString()) { - col_type = types::DataType::STRING; - } else if (value.IsBool()) { - col_type = types::DataType::BOOLEAN; - } else if (value.IsObject()) { - col_type = types::DataType::STRING; - } else if (value.IsArray()) { - col_type = types::DataType::STRING; - } else { - return error::Internal("Unable to parse JSON key '$0': unsupported type: $1", name, - RapidJSONTypeToString(itr->value.GetType())); - } - data_elements.emplace_back(name, "", col_type); - } - - return data_elements; -} - -StatusOr DataElementsFromCSV(std::ifstream& file_name) { - PX_UNUSED(file_name); - return BackedDataElements(0); -} - -StatusOr DataElementsForUnstructuredFile() { - BackedDataElements data_elements(3); - data_elements.emplace_back("time_", "", types::DataType::TIME64NS); - data_elements.emplace_back("uuid", "", types::DataType::UINT128); - data_elements.emplace_back("raw_line", "", types::DataType::STRING); - return data_elements; -} - -namespace { - -StatusOr> DataElementsFromFile( - const std::filesystem::path& file_name, bool allow_unstructured = true) { - auto f = std::ifstream(file_name.string()); - if (!f.is_open()) { - return error::Internal("Failed to open file: $0 with error=$1", file_name.string(), - strerror(errno)); - } - - // get the file extension of the file - auto extension = file_name.extension().string(); - BackedDataElements data_elements; - if (extension == ".csv") { - PX_ASSIGN_OR_RETURN(data_elements, DataElementsFromCSV(f)); - } else if (extension == ".json") { - PX_ASSIGN_OR_RETURN(data_elements, DataElementsFromJSON(f)); - } else { - if (allow_unstructured) { - LOG(WARNING) << absl::Substitute("Unsupported file type: $0, treating each line as a single column", extension); - PX_ASSIGN_OR_RETURN(data_elements, DataElementsForUnstructuredFile()); - } else { - // TODO(ddelnano): If file extension is blank this isn't a helpful error message. - return error::Internal("Unsupported file type: $0", extension); - } - } - - f.seekg(0, std::ios::beg); - return std::make_pair(std::move(data_elements), std::move(f)); -} - -} // namespace - -StatusOr> FileSourceConnector::Create( - std::string_view source_name, const std::filesystem::path file_name) { - auto host_path = px::system::Config::GetInstance().ToHostPath(file_name); - PX_ASSIGN_OR_RETURN(auto data_elements_and_file, DataElementsFromFile(host_path)); - auto& [data_elements, file] = data_elements_and_file; - - // Get just the filename and extension - auto name = host_path.filename().string(); - std::unique_ptr table_schema = - DynamicDataTableSchema::Create(name, "", std::move(data_elements)); - return std::unique_ptr(new FileSourceConnector( - source_name, std::move(host_path), std::move(file), std::move(table_schema))); -} - -FileSourceConnector::FileSourceConnector(std::string_view source_name, - const std::filesystem::path& file_name, std::ifstream file, - std::unique_ptr table_schema) - : SourceConnector(source_name, ArrayView(&table_schema->Get(), 1)), - name_(source_name), - file_name_(file_name), - file_(std::move(file)), - table_schema_(std::move(table_schema)), - transfer_specs_({ - {".json", {&FileSourceConnector::TransferDataFromJSON}}, - {".csv", {&FileSourceConnector::TransferDataFromCSV}}, - {"", {&FileSourceConnector::TransferDataFromUnstructuredFile}}, - {".log", {&FileSourceConnector::TransferDataFromUnstructuredFile}}, - }) {} - -Status FileSourceConnector::InitImpl() { - sampling_freq_mgr_.set_period(kSamplingPeriod); - push_freq_mgr_.set_period(kPushPeriod); - return Status::OK(); -} - -Status FileSourceConnector::StopImpl() { - file_.close(); - return Status::OK(); -} - -constexpr int kMaxLines = 1000; - -void FileSourceConnector::TransferDataFromJSON(DataTable::DynamicRecordBuilder* /*r*/, - uint64_t nanos, const std::string& line) { - rapidjson::Document d; - rapidjson::ParseResult ok = d.Parse(line.c_str()); - if (!ok) { - LOG(ERROR) << absl::Substitute("Failed to parse JSON: $0 $1", line, - rapidjson::GetParseError_En(ok.Code())); - return; - } - DataTable::DynamicRecordBuilder r(data_tables_[0]); - const auto& columns = table_schema_->Get().elements(); - - for (size_t col = 0; col < columns.size(); col++) { - const auto& column = columns[col]; - std::string key(column.name()); - // time_ is inserted by stirling and not within the polled file - if (key == "time_") { - r.Append(col, types::Time64NSValue(nanos)); - continue; - } else if (key == "uuid") { - sole::uuid u = sole::uuid4(); - r.Append(col, types::UInt128Value(u.ab, u.cd)); - continue; - } - const auto& value = d[key.c_str()]; - switch (column.type()) { - case types::DataType::INT64: - r.Append(col, types::Int64Value(value.GetInt())); - break; - case types::DataType::FLOAT64: - r.Append(col, types::Float64Value(value.GetDouble())); - break; - case types::DataType::STRING: - if (value.IsArray() || value.IsObject()) { - rapidjson::StringBuffer buffer; - rapidjson::Writer writer(buffer); - value.Accept(writer); - r.Append(col, types::StringValue(buffer.GetString()), kMaxStringBytes); - } else { - r.Append(col, types::StringValue(value.GetString()), kMaxStringBytes); - } - break; - case types::DataType::BOOLEAN: - r.Append(col, types::BoolValue(value.GetBool())); - break; - default: - LOG(FATAL) << absl::Substitute( - "Failed to insert field into DataTable: unsupported type '$0'", - types::DataType_Name(column.type())); - } - } - return; -} - -void FileSourceConnector::TransferDataFromUnstructuredFile(DataTable::DynamicRecordBuilder* /*r*/, - uint64_t nanos, const std::string& line) { - DataTable::DynamicRecordBuilder r(data_tables_[0]); - r.Append(0, types::Time64NSValue(nanos)); - sole::uuid u = sole::uuid4(); - r.Append(1, types::UInt128Value(u.ab, u.cd)); - r.Append(2, types::StringValue(line), kMaxStringBytes); - return; -} - -void FileSourceConnector::TransferDataFromCSV(DataTable::DynamicRecordBuilder* r, uint64_t nanos, - const std::string& line) { - PX_UNUSED(r); - PX_UNUSED(nanos); - PX_UNUSED(line); - return; -} - -void FileSourceConnector::TransferDataImpl(ConnectorContext* /* ctx */) { - DCHECK_EQ(data_tables_.size(), 1U) << "Only one table is allowed per FileSourceConnector."; - int i = 0; - auto extension = file_name_.extension().string(); - auto transfer_fn = transfer_specs_.at(extension).transfer_fn; - - auto now = std::chrono::system_clock::now(); - auto duration = now.time_since_epoch(); - uint64_t nanos = std::chrono::duration_cast(duration).count(); - auto before_pos = file_.tellg(); - while (i < kMaxLines) { - std::string line; - std::getline(file_, line); - - if (file_.eof() || line.empty()) { - file_.clear(); - auto after_pos = file_.tellg(); - if (after_pos == last_pos_) { - LOG_EVERY_N(INFO, 100) << absl::Substitute("Reached EOF for file=$0 eof count=$1 pos=", - file_name_.string(), eof_count_) - << after_pos; - eof_count_++; - - // TODO(ddlenano): Using a file's inode is a better way to detect file rotation. For now, - // this will suffice. - std::ifstream s(file_name_, std::ios::ate | std::ios::binary); - if (s.tellg() < after_pos) { - LOG(INFO) << "Detected file rotation, resetting file position"; - file_.close(); - file_.open(file_name_, std::ios::in); - } - } - break; - } - - transfer_fn(*this, nullptr, nanos, line); - i++; - } - auto after_pos = file_.tellg(); - last_pos_ = after_pos; - monitor_.AppendStreamStatusRecord(file_name_, after_pos - before_pos, ""); -} - -} // namespace stirling -} // namespace px diff --git a/src/stirling/source_connectors/file_source/file_source_connector.h b/src/stirling/source_connectors/file_source/file_source_connector.h deleted file mode 100644 index 1525327a652..00000000000 --- a/src/stirling/source_connectors/file_source/file_source_connector.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include -#include -#include -#include - -#include "src/stirling/core/source_connector.h" -#include "src/stirling/utils/monitor.h" - -namespace px { -namespace stirling { - -class FileSourceConnector : public SourceConnector { - using pos_type = std::ifstream::pos_type; - - public: - static constexpr auto kSamplingPeriod = std::chrono::milliseconds{100}; - // Set this high enough to avoid the following error: - // F20250129 00:05:30.980778 2527479 source_connector.cc:64] Failed to push data. Message = - // Table_id 1 doesn't exist. - // - // This occurs when the Stirling data table has data but the table store hasn't received its - // schema yet. I'm not sure why the dynamic tracer doesn't experience this case. - static constexpr auto kPushPeriod = std::chrono::milliseconds{7000}; - - static StatusOr > Create(std::string_view source_name, - const std::filesystem::path file_name); - - FileSourceConnector() = delete; - ~FileSourceConnector() override = default; - - protected: - explicit FileSourceConnector(std::string_view source_name, const std::filesystem::path& file_name, - std::ifstream file, - std::unique_ptr table_schema); - Status InitImpl() override; - Status StopImpl() override; - void TransferDataImpl(ConnectorContext* ctx) override; - - private: - void TransferDataFromUnstructuredFile(DataTable::DynamicRecordBuilder* builder, uint64_t nanos, - const std::string& line); - void TransferDataFromJSON(DataTable::DynamicRecordBuilder* builder, uint64_t nanos, - const std::string& line); - void TransferDataFromCSV(DataTable::DynamicRecordBuilder* builder, uint64_t nanos, - const std::string& line); - - struct FileTransferSpec { - std::function - transfer_fn; - }; - std::string name_; - const std::filesystem::path file_name_; - std::ifstream file_; - std::unique_ptr table_schema_; - absl::flat_hash_map transfer_specs_; - int eof_count_ = 0; - pos_type last_pos_ = 0; - StirlingMonitor& monitor_ = *StirlingMonitor::GetInstance(); -}; - -StatusOr DataElementsFromJSON(std::ifstream& f_stream); -StatusOr DataElementsFromCSV(std::ifstream& f_stream); -StatusOr DataElementsForUnstructuredFile(); - -} // namespace stirling -} // namespace px diff --git a/src/stirling/source_connectors/file_source/file_source_connector_test.cc b/src/stirling/source_connectors/file_source/file_source_connector_test.cc deleted file mode 100644 index 4b5dba3c6b2..00000000000 --- a/src/stirling/source_connectors/file_source/file_source_connector_test.cc +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include - -#include "src/common/testing/testing.h" -#include "src/stirling/source_connectors/file_source/file_source_connector.h" - -namespace px { -namespace stirling { - -TEST(FileSourceConnectorTest, DataElementsFromJSON) { - const auto file_path = - testing::BazelRunfilePath("src/stirling/source_connectors/file_source/testdata/test.json"); - auto stream = std::ifstream(file_path); - auto result = DataElementsFromJSON(stream); - ASSERT_OK(result); - BackedDataElements elements = std::move(result.ValueOrDie()); - - ASSERT_EQ(elements.elements().size(), 8); - EXPECT_EQ(elements.elements()[0].name(), "time_"); - EXPECT_EQ(elements.elements()[0].type(), types::DataType::TIME64NS); - EXPECT_EQ(elements.elements()[1].name(), "uuid"); - EXPECT_EQ(elements.elements()[1].type(), types::DataType::UINT128); - EXPECT_EQ(elements.elements()[2].name(), "id"); - EXPECT_EQ(elements.elements()[2].type(), types::DataType::INT64); - EXPECT_EQ(elements.elements()[3].name(), "active"); - EXPECT_EQ(elements.elements()[3].type(), types::DataType::BOOLEAN); - EXPECT_EQ(elements.elements()[4].name(), "score"); - EXPECT_EQ(elements.elements()[4].type(), types::DataType::FLOAT64); - EXPECT_EQ(elements.elements()[5].name(), "name"); - EXPECT_EQ(elements.elements()[5].type(), types::DataType::STRING); - EXPECT_EQ(elements.elements()[6].name(), "object"); - EXPECT_EQ(elements.elements()[6].type(), types::DataType::STRING); - EXPECT_EQ(elements.elements()[7].name(), "arr"); - EXPECT_EQ(elements.elements()[7].type(), types::DataType::STRING); -} - -TEST(FileSourceConnectorTest, DISABLED_DataElementsFromJSON_UnsupportedTypes) { - const auto file_path = testing::BazelRunfilePath( - "src/stirling/source_connectors/file_source/testdata/unsupported.json"); - auto stream = std::ifstream(file_path); - auto result = DataElementsFromJSON(stream); - ASSERT_EQ(result.ok(), false); - ASSERT_EQ(result.status().msg(), - "Unable to parse JSON key 'unsupported': unsupported type: Object"); -} - -TEST(FileSourceConnectorTest, DataElementsForUnstructuredFile) { - - const auto file_path = testing::BazelRunfilePath( - "src/stirling/source_connectors/file_source/testdata/kern.log"); - auto stream = std::ifstream(file_path); - auto result = DataElementsForUnstructuredFile(); - ASSERT_OK(result); - BackedDataElements elements = std::move(result.ValueOrDie()); - EXPECT_EQ(elements.elements()[0].name(), "time_"); - EXPECT_EQ(elements.elements()[0].type(), types::DataType::TIME64NS); - EXPECT_EQ(elements.elements()[1].name(), "uuid"); - EXPECT_EQ(elements.elements()[1].type(), types::DataType::UINT128); - EXPECT_EQ(elements.elements()[2].name(), "raw_line"); - EXPECT_EQ(elements.elements()[2].type(), types::DataType::STRING); -} - -} // namespace stirling -} // namespace px diff --git a/src/stirling/source_connectors/file_source/stirling_fs_test.cc b/src/stirling/source_connectors/file_source/stirling_fs_test.cc deleted file mode 100644 index 6ce799e7326..00000000000 --- a/src/stirling/source_connectors/file_source/stirling_fs_test.cc +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include - -#include - -#include "src/common/base/base.h" -#include "src/common/testing/testing.h" -#include "src/stirling/core/source_registry.h" -#include "src/stirling/core/types.h" -#include "src/stirling/stirling.h" - -namespace px { -namespace stirling { - -using ::px::testing::BazelRunfilePath; -using ::testing::SizeIs; -using ::testing::StrEq; - -//----------------------------------------------------------------------------- -// Test fixture and shared code -//----------------------------------------------------------------------------- - -class StirlingFileSourceTest : public ::testing::Test { - protected: - void SetUp() override { - std::unique_ptr registry = std::make_unique(); - stirling_ = Stirling::Create(std::move(registry)); - - // Set function to call on data pushes. - stirling_->RegisterDataPushCallback( - absl::bind_front(&StirlingFileSourceTest::AppendData, this)); - } - - Status AppendData(uint64_t /*table_id*/, types::TabletID /*tablet_id*/, - std::unique_ptr record_batch) { - record_batches_.push_back(std::move(record_batch)); - return Status::OK(); - } - - StatusOr WaitForStatus(sole::uuid trace_id) { - StatusOr s; - do { - s = stirling_->GetFileSourceInfo(trace_id); - std::this_thread::sleep_for(std::chrono::seconds(1)); - } while (!s.ok() && s.code() == px::statuspb::Code::RESOURCE_UNAVAILABLE); - - return s; - } - - std::optional FindFieldIndex(const stirlingpb::TableSchema& schema, - std::string_view field_name) { - int idx = 0; - for (const auto& e : schema.elements()) { - if (e.name() == field_name) { - return idx; - } - ++idx; - } - return {}; - } - - void DeployFileSource(std::string file_name, bool trigger_stop = true) { - sole::uuid id = sole::uuid4(); - stirling_->RegisterFileSource(id, file_name); - - // Should deploy. - stirlingpb::Publish publication; - ASSERT_OK_AND_ASSIGN(publication, WaitForStatus(id)); - - // Check the incremental publication change. - ASSERT_EQ(publication.published_info_classes_size(), 1); - info_class_ = publication.published_info_classes(0); - - // Run Stirling data collector. - ASSERT_OK(stirling_->RunAsThread()); - - // Wait to capture some data. - while (record_batches_.empty()) { - std::this_thread::sleep_for(std::chrono::seconds(1)); - } - - if (trigger_stop) { - ASSERT_OK(stirling_->RemoveFileSource(id)); - - // Should get removed. - EXPECT_EQ(WaitForStatus(id).code(), px::statuspb::Code::NOT_FOUND); - - stirling_->Stop(); - } - } - - std::unique_ptr stirling_; - std::vector> record_batches_; - stirlingpb::InfoClass info_class_; -}; - -class FileSourceJSONTest : public StirlingFileSourceTest { - protected: - const std::string kFilePath = - BazelRunfilePath("src/stirling/source_connectors/file_source/testdata/test.json"); -}; - -TEST_F(FileSourceJSONTest, ParsesJSONFile) { - DeployFileSource(kFilePath); - EXPECT_THAT(record_batches_, SizeIs(1)); - auto& rb = record_batches_[0]; - // Expect there to be 8 columns. time_ and the 4 cols from the JSON file. - EXPECT_EQ(rb->size(), 8); - - for (size_t i = 0; i < rb->size(); ++i) { - auto col_wrapper = rb->at(i); - // The JSON file has 10 lines. - EXPECT_EQ(col_wrapper->Size(), 10); - } -} - -TEST_F(FileSourceJSONTest, ContinuesReadingAfterEOFReached) { - std::string file_name = "./test.json"; - std::ofstream ofs(file_name, std::ios::app); - if (!ofs) { - LOG(FATAL) << absl::Substitute("Failed to open file= $0 received error=$1", kFilePath, strerror(errno)); - } - // FileSourceConnector parses the first line to infer the file's schema, an empty file will cause an error. - ofs << R"({"id": 0, "active": false, "score": 6.28, "name": "item0", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; - - DeployFileSource(file_name, false); - EXPECT_THAT(record_batches_, SizeIs(1)); - auto& rb = record_batches_[0]; - // Expect there to be 8 columns. time_ and the 4 cols from the JSON file. - EXPECT_EQ(rb->size(), 8); - - for (size_t i = 0; i < rb->size(); ++i) { - auto col_wrapper = rb->at(i); - // TODO(ddelnano): Clean up these log messages and add better assertions for uint128 case - if (i == 1) { - LOG(INFO) << col_wrapper->Get(0).val; - LOG(INFO) << col_wrapper->Get(1).val; - } else if (i == 6) { - LOG(INFO) << col_wrapper->Get(0); - EXPECT_EQ(col_wrapper->Get(0), R"({"a":1,"b":2})"); - } else if (i == 7) { - LOG(INFO) << col_wrapper->Get(0); - EXPECT_EQ(col_wrapper->Get(0), R"([0,1,2])"); - } - // The file's first row batch has 1 line - EXPECT_EQ(col_wrapper->Size(), 1); - } - - ofs << R"({"id": 1, "active": false, "score": 6.28, "name": "item1", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; - ofs.flush(); - ofs.close(); - - while (record_batches_.size() < 2) { - std::this_thread::sleep_for(std::chrono::seconds(3)); - LOG(INFO) << "Waiting for more data..."; - } - - auto& rb2 = record_batches_[1]; - for (size_t i = 0; i < rb2->size(); ++i) { - auto col_wrapper = rb2->at(i); - // The file's second row batch has 1 line - EXPECT_EQ(col_wrapper->Size(), 1); - } -} - -TEST_F(FileSourceJSONTest, ContinuesReadingAfterFileRotation) { - std::string file_name = "./test2.json"; - std::ofstream ofs(file_name, std::ios::app); - if (!ofs) { - LOG(FATAL) << absl::Substitute("Failed to open file= $0 received error=$1", kFilePath, strerror(errno)); - } - // FileSourceConnector parses the first line to infer the file's schema, an empty file will cause an error. - ofs << R"({"id": 0, "active": false, "score": 6.28, "name": "item0", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; - ofs << R"({"id": 1, "active": false, "score": 6.28, "name": "item1", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; - - DeployFileSource(file_name, false); - EXPECT_THAT(record_batches_, SizeIs(1)); - auto& rb = record_batches_[0]; - // Expect there to be 8 columns. time_ and the 4 cols from the JSON file. - EXPECT_EQ(rb->size(), 8); - - for (size_t i = 0; i < rb->size(); ++i) { - auto col_wrapper = rb->at(i); - // The file's first row batch has 2 lines - EXPECT_EQ(col_wrapper->Size(), 2); - } - - std::ofstream ofs2(file_name, std::ios::trunc); - ofs2 << R"({"id": 2, "active": false, "score": 6.28, "name": "item2", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]})" << std::endl; - ofs2.flush(); - ofs.close(); - - while (record_batches_.size() < 2) { - std::this_thread::sleep_for(std::chrono::seconds(3)); - LOG(INFO) << "Waiting for more data..."; - } - - auto& rb2 = record_batches_[1]; - for (size_t i = 0; i < rb2->size(); ++i) { - auto col_wrapper = rb2->at(i); - // The file's second row batch has 1 line - EXPECT_EQ(col_wrapper->Size(), 1); - } -} - -} // namespace stirling -} // namespace px diff --git a/src/stirling/source_connectors/file_source/testdata/kern.log b/src/stirling/source_connectors/file_source/testdata/kern.log deleted file mode 100644 index fed434d43a4..00000000000 --- a/src/stirling/source_connectors/file_source/testdata/kern.log +++ /dev/null @@ -1,5 +0,0 @@ -2025-03-05T22:30:12.313406+00:00 dev-vm kernel: ll header: 00000000: ff ff ff ff ff ff 42 01 0a 81 00 01 08 06 -2025-03-05T22:30:18.313309+00:00 dev-vm kernel: IPv4: martian source 10.129.0.8 from 10.129.0.1, on dev ens4 -2025-03-05T22:30:18.313333+00:00 dev-vm kernel: ll header: 00000000: ff ff ff ff ff ff 42 01 0a 81 00 01 08 06 -2025-03-05T22:30:24.313240+00:00 dev-vm kernel: IPv4: martian source 10.129.0.8 from 10.129.0.1, on dev ens4 -2025-03-05T22:30:24.313268+00:00 dev-vm kernel: ll header: 00000000: ff ff ff ff ff ff 42 01 0a 81 00 01 08 06 diff --git a/src/stirling/source_connectors/file_source/testdata/test.json b/src/stirling/source_connectors/file_source/testdata/test.json deleted file mode 100644 index f65c3fabafb..00000000000 --- a/src/stirling/source_connectors/file_source/testdata/test.json +++ /dev/null @@ -1,10 +0,0 @@ -{"id": 1, "active": true, "score": 3.14, "name": "item1", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 2, "active": false, "score": 2.71, "name": "item2", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 3, "active": true, "score": 1.41, "name": "item3", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 4, "active": false, "score": 1.73, "name": "item4", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 5, "active": true, "score": 0.99, "name": "item5", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 6, "active": false, "score": 2.18, "name": "item6", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 7, "active": true, "score": 3.67, "name": "item7", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 8, "active": false, "score": 4.56, "name": "item8", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 9, "active": true, "score": 5.32, "name": "item9", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} -{"id": 10, "active": false, "score": 6.28, "name": "item10", "object": {"a": 1, "b": 2}, "arr": [0, 1, 2]} diff --git a/src/stirling/source_connectors/file_source/testdata/unsupported.json b/src/stirling/source_connectors/file_source/testdata/unsupported.json deleted file mode 100644 index 455064ea679..00000000000 --- a/src/stirling/source_connectors/file_source/testdata/unsupported.json +++ /dev/null @@ -1 +0,0 @@ -{"id": 1, "active": true, "score": 3.14, "name": "item1", "unsupported": {"a": 1, "b": 2}} diff --git a/src/stirling/source_connectors/socket_tracer/BUILD.bazel b/src/stirling/source_connectors/socket_tracer/BUILD.bazel index 893de0485a5..90154e7edc4 100644 --- a/src/stirling/source_connectors/socket_tracer/BUILD.bazel +++ b/src/stirling/source_connectors/socket_tracer/BUILD.bazel @@ -464,7 +464,7 @@ pl_cc_bpf_test( "//src/stirling/source_connectors/socket_tracer/testing/container_images:node_12_3_1_container", "//src/stirling/source_connectors/socket_tracer/testing/container_images:node_14_18_1_alpine_container", "//src/stirling/source_connectors/socket_tracer/testing/container_images:node_client_container", - "//src/stirling/source_connectors/socket_tracer/testing/container_images:python_3_10_container", + "//src/stirling/source_connectors/socket_tracer/testing/container_images:python_min_310_container", "//src/stirling/source_connectors/socket_tracer/testing/container_images:ruby_container", "//src/stirling/testing:cc_library", ], diff --git a/src/stirling/source_connectors/socket_tracer/testing/container_images/BUILD.bazel b/src/stirling/source_connectors/socket_tracer/testing/container_images/BUILD.bazel index 8b0a3e3ee2b..2b6e32d678a 100644 --- a/src/stirling/source_connectors/socket_tracer/testing/container_images/BUILD.bazel +++ b/src/stirling/source_connectors/socket_tracer/testing/container_images/BUILD.bazel @@ -322,9 +322,9 @@ pl_cc_test_library( ) pl_cc_test_library( - name = "python_3_10_container", + name = "python_min_310_container", srcs = [], - hdrs = ["python_3_10_container.h"], + hdrs = ["python_min_310_container.h"], data = [ "//src/stirling/source_connectors/socket_tracer/testing/containers/ssl:python_min_310_https_server.tar", ], diff --git a/src/stirling/source_connectors/stirling_error/BUILD.bazel b/src/stirling/source_connectors/stirling_error/BUILD.bazel index c0c843d88ca..15f25dc41af 100644 --- a/src/stirling/source_connectors/stirling_error/BUILD.bazel +++ b/src/stirling/source_connectors/stirling_error/BUILD.bazel @@ -17,7 +17,7 @@ load("//bazel:pl_build_system.bzl", "pl_cc_bpf_test", "pl_cc_library") load("//src/stirling/source_connectors/perf_profiler/testing:testing.bzl", "agent_libs", "px_jattach", "stirling_profiler_java_args") -package(default_visibility = ["//src/stirling:__subpackages__", "//src/vizier/services/agent/shared/manager:__subpackages__"]) +package(default_visibility = ["//src/stirling:__subpackages__"]) pl_cc_library( name = "cc_library", @@ -42,7 +42,6 @@ pl_cc_bpf_test( args = stirling_profiler_java_args, data = agent_libs + [ px_jattach, - "testdata/test.json", "//src/stirling/source_connectors/perf_profiler/testing/java:java_image_base-java-profiler-test-image-omit-frame-pointer.tar", ], flaky = True, diff --git a/src/stirling/source_connectors/stirling_error/sink_results_table.h b/src/stirling/source_connectors/stirling_error/sink_results_table.h deleted file mode 100644 index d2f15bbfa57..00000000000 --- a/src/stirling/source_connectors/stirling_error/sink_results_table.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include "src/common/base/base.h" -#include "src/stirling/core/canonical_types.h" -#include "src/stirling/core/output.h" -#include "src/stirling/core/source_connector.h" - -namespace px { -namespace stirling { - -// clang-format off -constexpr DataElement kSinkResultsElements[] = { - canonical_data_elements::kTime, - canonical_data_elements::kUPID, - {"bytes_transferred", "", - types::DataType::INT64, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, - {"destination", "The planpb::OperatorType enum of the sink", - types::DataType::INT64, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, - {"stream_id", "The ID of the stream of interest.", - types::DataType::STRING, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, -}; - -constexpr DataTableSchema kSinkResultsTable { - "sink_results", - "This table contains the sink node results during execution.", - kSinkResultsElements -}; - -// clang-format on -DEFINE_PRINT_TABLE(SinkResults); - -} // namespace stirling -} // namespace px diff --git a/src/stirling/source_connectors/stirling_error/stirling_error_bpf_test.cc b/src/stirling/source_connectors/stirling_error/stirling_error_bpf_test.cc index df3b567982b..7eb9f8a910c 100644 --- a/src/stirling/source_connectors/stirling_error/stirling_error_bpf_test.cc +++ b/src/stirling/source_connectors/stirling_error/stirling_error_bpf_test.cc @@ -106,23 +106,6 @@ std::vector ToProbeRecordVector( return result; } -std::vector ToStreamRecordVector( - const std::vector>& record_batches) { - std::vector result; - - for (size_t rb_idx = 0; rb_idx < record_batches.size(); ++rb_idx) { - auto& rb = *record_batches[rb_idx]; - for (size_t idx = 0; idx < rb.front()->Size(); ++idx) { - StreamStatusRecord r; - r.stream_id = rb[2]->Get(idx).string(); - r.bytes_sent = rb[3]->Get(idx).val; - r.info = rb[4]->Get(idx).string(); - result.push_back(r); - } - } - return result; -} - // A SourceConnector that fails on Init. class FaultyConnector : public SourceConnector { public: @@ -212,25 +195,6 @@ class StirlingErrorTest : public ::testing::Test { return trace_id; } - StatusOr DeployFileSource(const std::string& program_text) { - // Compile file source. - PX_ASSIGN_OR_RETURN(auto compiled_file_source, - px::carnot::planner::compiler::CompileFileSource(program_text)); - - // Register tracepoint. - sole::uuid id = sole::uuid4(); - stirling_->RegisterFileSource(id, std::move(compiled_file_source.glob_pattern())); - - // Wait for deployment to finish. - StatusOr s; - do { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - s = stirling_->GetFileSourceInfo(id); - } while (!s.ok() && s.code() == px::statuspb::Code::RESOURCE_UNAVAILABLE); - - return id; - } - Status AppendData(uint64_t table_id, types::TabletID tablet_id, std::unique_ptr record_batch) { PX_UNUSED(tablet_id); @@ -244,8 +208,6 @@ class StirlingErrorTest : public ::testing::Test { source_status_batches_.push_back(std::move(record_batch)); } else if (table_name == "probe_status") { probe_status_batches_.push_back(std::move(record_batch)); - } else if (table_name == "stream_status") { - stream_status_batches_.push_back(std::move(record_batch)); } } return Status::OK(); @@ -259,9 +221,6 @@ class StirlingErrorTest : public ::testing::Test { } else if constexpr (std::is_same_v) { return WaitAndExpectRecords([&]() { return ToProbeRecordVector(probe_status_batches_); }, expected); - } else if constexpr (std::is_same_v) { - return WaitAndExpectRecords([&]() { return ToStreamRecordVector(stream_status_batches_); }, - expected); } else { static_assert(always_false); } @@ -271,7 +230,6 @@ class StirlingErrorTest : public ::testing::Test { std::unique_ptr stirling_; std::vector> source_status_batches_; std::vector> probe_status_batches_; - std::vector> stream_status_batches_; }; TEST_F(StirlingErrorTest, SourceConnectorInitOK) { @@ -569,55 +527,5 @@ TEST_F(StirlingErrorTest, PerfProfilerNoPreserveFramePointer) { EXPECT_THAT(probe_records, IsEmpty()); } -// Deploy a FileSource stream and record the progress of the stream throughput. -// Expects one message for each TransferDataImpl call to the FileSource. -TEST_F(StirlingErrorTest, StreamStatusThroughput) { - // Register StirlingErrorConnector. - std::unique_ptr registry = std::make_unique(); - registry->RegisterOrDie("stirling_error"); - - // Run Stirling. - InitStirling(std::move(registry)); - ASSERT_OK(stirling_->RunAsThread()); - ASSERT_OK(stirling_->WaitUntilRunning(std::chrono::seconds(5))); - - auto file_stream_pxl = R"( -import pxlog -glob_pattern = '$0' -table_name = '$1' -pxlog.FileSource(glob_pattern, table_name, "1m") -)"; - - const auto glob_pattern = - BazelRunfilePath("src/stirling/source_connectors/stirling_error/testdata/test.json").string(); - const auto table_name = "test.json"; - - ASSERT_OK_AND_ASSIGN( - auto id, DeployFileSource(absl::Substitute(file_stream_pxl, glob_pattern, table_name))); - - // Stirling Error Source Connector Initialization. - WaitAndExpectStatusRecords(std::vector{ - {.source_connector = "stirling_error", - .status = px::statuspb::Code::OK, - .error = "", - .context = "Init"}, - }); - // Tracepoint deployed. - WaitAndExpectStatusRecords( - std::vector{{.stream_id = glob_pattern, .bytes_sent = 587, .info = ""}}); - - // Remove file source; - ASSERT_OK(stirling_->RemoveFileSource(id)); - StatusOr s; - do { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - s = stirling_->GetFileSourceInfo(id); - } while (s.ok()); - - // TODO(ddelnano): Add file source removal message assertion. - - stirling_->Stop(); -} - } // namespace stirling } // namespace px diff --git a/src/stirling/source_connectors/stirling_error/stirling_error_connector.cc b/src/stirling/source_connectors/stirling_error/stirling_error_connector.cc index 1be86eb2794..5489dac77f7 100644 --- a/src/stirling/source_connectors/stirling_error/stirling_error_connector.cc +++ b/src/stirling/source_connectors/stirling_error/stirling_error_connector.cc @@ -16,7 +16,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -#include +#include #include "src/common/base/base.h" #include "src/common/system/proc_parser.h" @@ -39,7 +39,7 @@ Status StirlingErrorConnector::InitImpl() { Status StirlingErrorConnector::StopImpl() { return Status::OK(); } void StirlingErrorConnector::TransferDataImpl(ConnectorContext* ctx) { - DCHECK_EQ(data_tables_.size(), 4U) << "StirlingErrorConnector has four data tables."; + DCHECK_EQ(data_tables_.size(), 2U) << "StirlingErrorConnector has two data tables."; if (data_tables_[kStirlingErrorTableNum] != nullptr) { TransferStirlingErrorTable(ctx, data_tables_[kStirlingErrorTableNum]); @@ -48,10 +48,6 @@ void StirlingErrorConnector::TransferDataImpl(ConnectorContext* ctx) { if (data_tables_[kProbeStatusTableNum] != nullptr) { TransferProbeStatusTable(ctx, data_tables_[kProbeStatusTableNum]); } - - if (data_tables_[kStreamStatusTableNum] != nullptr) { - TransferStreamStatusTable(ctx, data_tables_[kStreamStatusTableNum]); - } } void StirlingErrorConnector::TransferStirlingErrorTable(ConnectorContext* ctx, @@ -83,18 +79,5 @@ void StirlingErrorConnector::TransferProbeStatusTable(ConnectorContext* ctx, } } -void StirlingErrorConnector::TransferStreamStatusTable(ConnectorContext* ctx, - DataTable* data_table) { - md::UPID upid = md::UPID(ctx->GetASID(), pid_, start_time_); - for (auto& record : monitor_.ConsumeStreamStatusRecords()) { - DataTable::RecordBuilder<&kStreamStatusTable> r(data_table, record.timestamp_ns); - r.Append(static_cast(record.timestamp_ns)); - r.Append(upid.value()); - r.Append(std::move(record.stream_id)); - r.Append(static_cast(record.bytes_sent)); - r.Append(std::move(record.info)); - } -} - } // namespace stirling } // namespace px diff --git a/src/stirling/source_connectors/stirling_error/stirling_error_connector.h b/src/stirling/source_connectors/stirling_error/stirling_error_connector.h index 21db2a7c7f6..0dae755c947 100644 --- a/src/stirling/source_connectors/stirling_error/stirling_error_connector.h +++ b/src/stirling/source_connectors/stirling_error/stirling_error_connector.h @@ -26,9 +26,7 @@ #include "src/common/base/base.h" #include "src/stirling/core/source_connector.h" #include "src/stirling/source_connectors/stirling_error/probe_status_table.h" -#include "src/stirling/source_connectors/stirling_error/sink_results_table.h" #include "src/stirling/source_connectors/stirling_error/stirling_error_table.h" -#include "src/stirling/source_connectors/stirling_error/stream_status_table.h" #include "src/stirling/utils/monitor.h" namespace px { @@ -39,11 +37,9 @@ class StirlingErrorConnector : public SourceConnector { static constexpr std::string_view kName = "stirling_error"; static constexpr auto kSamplingPeriod = std::chrono::milliseconds{1000}; static constexpr auto kPushPeriod = std::chrono::milliseconds{1000}; - static constexpr auto kTables = - MakeArray(kStirlingErrorTable, kProbeStatusTable, kStreamStatusTable, kSinkResultsTable); + static constexpr auto kTables = MakeArray(kStirlingErrorTable, kProbeStatusTable); static constexpr uint32_t kStirlingErrorTableNum = TableNum(kTables, kStirlingErrorTable); static constexpr uint32_t kProbeStatusTableNum = TableNum(kTables, kProbeStatusTable); - static constexpr uint32_t kStreamStatusTableNum = TableNum(kTables, kStreamStatusTable); StirlingErrorConnector() = delete; ~StirlingErrorConnector() override = default; @@ -63,7 +59,6 @@ class StirlingErrorConnector : public SourceConnector { void TransferStirlingErrorTable(ConnectorContext* ctx, DataTable* data_table); void TransferProbeStatusTable(ConnectorContext* ctx, DataTable* data_table); - void TransferStreamStatusTable(ConnectorContext* ctx, DataTable* data_table); StirlingMonitor& monitor_ = *StirlingMonitor::GetInstance(); int32_t pid_ = -1; diff --git a/src/stirling/source_connectors/stirling_error/stream_status_table.h b/src/stirling/source_connectors/stirling_error/stream_status_table.h deleted file mode 100644 index 160694cbcad..00000000000 --- a/src/stirling/source_connectors/stirling_error/stream_status_table.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include "src/common/base/base.h" -#include "src/stirling/core/canonical_types.h" -#include "src/stirling/core/output.h" -#include "src/stirling/core/source_connector.h" - -namespace px { -namespace stirling { - -// clang-format off -constexpr DataElement kStreamStatusElements[] = { - canonical_data_elements::kTime, - canonical_data_elements::kUPID, - {"stream_id", "The ID of the stream of interest. For file source connector this is glob_pattern", - types::DataType::STRING, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, - {"bytes_sent", "The error messages of the deployment or event, if any", - types::DataType::INT64, types::SemanticType::ST_BYTES, types::PatternType::METRIC_COUNTER}, - {"info", "Optional extra info provided as a JSON", - types::DataType::STRING, types::SemanticType::ST_NONE, types::PatternType::GENERAL}, -}; - -constexpr DataTableSchema kStreamStatusTable { - "stream_status", - "This table contains the status of streams Stirling is ingested across various source connectors", - kStreamStatusElements -}; - -// clang-format on -DEFINE_PRINT_TABLE(StreamStatus); - -} // namespace stirling -} // namespace px diff --git a/src/stirling/source_connectors/stirling_error/testdata/test.json b/src/stirling/source_connectors/stirling_error/testdata/test.json deleted file mode 100644 index 96b30cbd35c..00000000000 --- a/src/stirling/source_connectors/stirling_error/testdata/test.json +++ /dev/null @@ -1,10 +0,0 @@ -{"id": 1, "active": true, "score": 3.14, "name": "item1"} -{"id": 2, "active": false, "score": 2.71, "name": "item2"} -{"id": 3, "active": true, "score": 1.41, "name": "item3"} -{"id": 4, "active": false, "score": 1.73, "name": "item4"} -{"id": 5, "active": true, "score": 0.99, "name": "item5"} -{"id": 6, "active": false, "score": 2.18, "name": "item6"} -{"id": 7, "active": true, "score": 3.67, "name": "item7"} -{"id": 8, "active": false, "score": 4.56, "name": "item8"} -{"id": 9, "active": true, "score": 5.32, "name": "item9"} -{"id": 10, "active": false, "score": 6.28, "name": "item10"} diff --git a/src/stirling/source_connectors/stirling_error/testdata/unsupported.json b/src/stirling/source_connectors/stirling_error/testdata/unsupported.json deleted file mode 100644 index 455064ea679..00000000000 --- a/src/stirling/source_connectors/stirling_error/testdata/unsupported.json +++ /dev/null @@ -1 +0,0 @@ -{"id": 1, "active": true, "score": 3.14, "name": "item1", "unsupported": {"a": 1, "b": 2}} diff --git a/src/stirling/stirling.cc b/src/stirling/stirling.cc index fd35286854e..5b15a5ecabd 100644 --- a/src/stirling/stirling.cc +++ b/src/stirling/stirling.cc @@ -46,7 +46,6 @@ #include "src/stirling/source_connectors/dynamic_bpftrace/dynamic_bpftrace_connector.h" #include "src/stirling/source_connectors/dynamic_bpftrace/utils.h" #include "src/stirling/source_connectors/dynamic_tracer/dynamic_trace_connector.h" -#include "src/stirling/source_connectors/file_source/file_source_connector.h" #include "src/stirling/source_connectors/jvm_stats/jvm_stats_connector.h" #include "src/stirling/source_connectors/network_stats/network_stats_connector.h" #include "src/stirling/source_connectors/perf_profiler/perf_profile_connector.h" @@ -201,11 +200,8 @@ class StirlingImpl final : public Stirling { void RegisterTracepoint( sole::uuid uuid, std::unique_ptr program) override; - void RegisterFileSource(sole::uuid id, std::string file_name) override; StatusOr GetTracepointInfo(sole::uuid trace_id) override; - StatusOr GetFileSourceInfo(sole::uuid trace_id) override; Status RemoveTracepoint(sole::uuid trace_id) override; - Status RemoveFileSource(sole::uuid trace_id) override; void GetPublishProto(stirlingpb::Publish* publish_pb) override; void RegisterDataPushCallback(DataPushCallback f) override { data_push_callback_ = f; } void RegisterAgentMetadataCallback(AgentMetadataCallback f) override { @@ -228,12 +224,9 @@ class StirlingImpl final : public Stirling { void UpdateDynamicTraceStatus(const sole::uuid& uuid, const StatusOr& status); - void UpdateFileSourceStatus(const sole::uuid& uuid, const StatusOr& status); - private: // Adds a source to Stirling, and updates all state accordingly. - Status AddSource(std::unique_ptr source, - std::optional mutation_id = {}); + Status AddSource(std::unique_ptr source); // Removes a source and all its info classes from stirling. Status RemoveSource(std::string_view source_name); @@ -246,11 +239,6 @@ class StirlingImpl final : public Stirling { // Destroys a dynamic tracing source created by DeployDynamicTraceConnector. void DestroyDynamicTraceConnector(sole::uuid trace_id); - // Creates and deploys file source connector - void DeployFileSourceConnector(sole::uuid trace_id, std::string file_name); - - void DestroyFileSourceConnector(sole::uuid id); - // Main run implementation. void RunCore(); @@ -289,10 +277,6 @@ class StirlingImpl final : public Stirling { absl::flat_hash_map> dynamic_trace_status_map_ ABSL_GUARDED_BY(dynamic_trace_status_map_lock_); - absl::base_internal::SpinLock file_source_status_map_lock_; - absl::flat_hash_map> file_source_status_map_ - ABSL_GUARDED_BY(file_source_status_map_lock_); - StirlingMonitor& monitor_ = *StirlingMonitor::GetInstance(); struct DynamicTraceInfo { @@ -304,15 +288,6 @@ class StirlingImpl final : public Stirling { absl::flat_hash_map trace_id_info_map_ ABSL_GUARDED_BY(dynamic_trace_status_map_lock_); - struct FileSourceInfo { - std::string source_connector; - std::string file_name; - std::string output_table; - }; - - absl::flat_hash_map file_source_info_map_ - ABSL_GUARDED_BY(file_source_status_map_lock_); - // RunCoreStats tracks how much work is accomplished in each run core iteration, // and it also keeps a histogram of sleep durations. RunCoreStats run_core_stats_; @@ -452,8 +427,7 @@ std::unique_ptr StirlingImpl::GetContext() { return std::unique_ptr(new SystemWideStandaloneContext()); } -Status StirlingImpl::AddSource(std::unique_ptr source, - std::optional mutation_id) { +Status StirlingImpl::AddSource(std::unique_ptr source) { PX_RETURN_IF_ERROR(source->Init()); absl::base_internal::SpinLockHolder lock(&info_class_mgrs_lock_); @@ -464,9 +438,6 @@ Status StirlingImpl::AddSource(std::unique_ptr source, LOG(INFO) << absl::Substitute("Adding info class: [$0/$1]", source->name(), schema.name()); auto mgr = std::make_unique(schema); mgr->SetSourceConnector(source.get()); - if (mutation_id.has_value()) { - mgr->SetMutationId(mutation_id.value()); - } data_tables.push_back(mgr->data_table()); info_class_mgrs_.push_back(std::move(mgr)); } @@ -528,13 +499,6 @@ Status StirlingImpl::RemoveSource(std::string_view source_name) { namespace { constexpr char kDynTraceSourcePrefix[] = "DT_"; -constexpr char kFileSourcePrefix[] = "LOG_"; - -StatusOr> CreateFileSourceConnector(sole::uuid id, - std::string file_name) { - auto name = absl::StrCat(kFileSourcePrefix, id.str()); - return FileSourceConnector::Create(name, file_name); -} StatusOr> CreateDynamicSourceConnector( sole::uuid trace_id, @@ -571,82 +535,6 @@ StatusOr> CreateDynamicSourceConnector( } // namespace -void StirlingImpl::UpdateFileSourceStatus(const sole::uuid& id, - const StatusOr& s) { - absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); - file_source_status_map_[id] = s; - - // Find program name and log dynamic trace status update to Stirling Monitor. - auto it = file_source_info_map_.find(id); - if (it != file_source_info_map_.end()) { - FileSourceInfo& file_source_info = it->second; - - // Build info JSON with trace_id and output_table. - ::px::utils::JSONObjectBuilder builder; - builder.WriteKV("trace_id", id.str()); - if (s.ok()) { - builder.WriteKV("output_table", file_source_info.output_table); - } - - monitor_.AppendSourceStatusRecord(file_source_info.source_connector, s.status(), - builder.GetString()); - - // Clean up map if status is not ok. When status is RESOURCE_UNAVAILABLE, either deployment - // or removal is pending, so don't clean up. - if (!s.ok() && s.code() != statuspb::Code::RESOURCE_UNAVAILABLE) { - file_source_info_map_.erase(id); - } - } -} - -void StirlingImpl::DeployFileSourceConnector(sole::uuid id, std::string file_name) { - auto timer = ElapsedTimer(); - timer.Start(); - - // Try creating the DynamicTraceConnector--which compiles BCC code. - // On failure, set status and exit. - auto source_or_s = CreateFileSourceConnector(id, file_name); - if (!source_or_s.ok()) { - Status ret_status(px::statuspb::Code::INTERNAL, source_or_s.msg()); - UpdateFileSourceStatus(id, ret_status); - LOG(INFO) << ret_status.ToString(); - return; - } - auto source = source_or_s.ConsumeValueOrDie(); - - LOG(INFO) << absl::Substitute("FileSourceConnector [$0] created in $1 ms.", source->name(), - timer.ElapsedTime_us() / 1000.0); - - // Cache table schema name as source will be moved below. - std::string output_name(source->table_schemas()[0].name()); - - { - absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); - auto it = file_source_info_map_.find(id); - if (it != file_source_info_map_.end()) { - file_source_info_map_[id].output_table = output_name; - } - } - - timer.Start(); - auto s = AddSource(std::move(source), id.str()); - if (!s.ok()) { - UpdateFileSourceStatus(id, s); - LOG(INFO) << s.ToString(); - return; - } - LOG(INFO) << absl::Substitute("FileSourceConnector [$0] created in $1 ms.", id.str(), - timer.ElapsedTime_us() / 1000.0); - - stirlingpb::Publish publication; - { - absl::base_internal::SpinLockHolder lock(&info_class_mgrs_lock_); - PopulatePublishProto(&publication, info_class_mgrs_, output_name); - } - - UpdateFileSourceStatus(id, publication); -} - void StirlingImpl::DeployDynamicTraceConnector( sole::uuid trace_id, std::unique_ptr program) { @@ -675,7 +563,7 @@ void StirlingImpl::DeployDynamicTraceConnector( timer.Start(); // Next, try adding the source (this actually tries to deploy BPF code). // On failure, set status and exit, but do this outside the lock for efficiency reasons. - RETURN_IF_ERROR(AddSource(std::move(source), trace_id.str())); + RETURN_IF_ERROR(AddSource(std::move(source))); LOG(INFO) << absl::Substitute("DynamicTrace [$0]: Deployed BPF program in $1 ms.", trace_id.str(), timer.ElapsedTime_us() / 1000.0); @@ -706,29 +594,6 @@ void StirlingImpl::DestroyDynamicTraceConnector(sole::uuid trace_id) { } } -void StirlingImpl::DestroyFileSourceConnector(sole::uuid trace_id) { - auto timer = ElapsedTimer(); - timer.Start(); - - // Remove from stirling. - auto s = RemoveSource(kFileSourcePrefix + trace_id.str()); - if (!s.ok()) { - UpdateFileSourceStatus(trace_id, s); - LOG(INFO) << s.ToString(); - return; - } - - LOG(INFO) << absl::Substitute("FileSource [$0]: Removed file polling $1 ms.", trace_id.str(), - timer.ElapsedTime_us() / 1000.0); - - // Remove from map. - { - absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); - file_source_status_map_.erase(trace_id); - file_source_info_map_.erase(trace_id); - } -} - #undef RETURN_ERROR #undef RETURN_IF_ERROR #undef ASSIGN_OR_RETURN @@ -787,29 +652,6 @@ void StirlingImpl::RegisterTracepoint( t.detach(); } -void StirlingImpl::RegisterFileSource(sole::uuid id, std::string file_name) { - // Temporary: Check if the target exists on this PEM, otherwise return NotFound. - // TODO(oazizi): Need to think of a better way of doing this. - // Need to differentiate errors caused by the binary not being on the host vs - // other errors. Also should consider races with binary creation/deletion. - { - absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); - std::string source_connector = "file_source"; - file_source_info_map_[id] = {.source_connector = std::move(source_connector), - .file_name = file_name, - .output_table = ""}; - } - - // Initialize the status of this trace to pending. - { - absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); - file_source_status_map_[id] = error::ResourceUnavailable("Waiting for file polling to start."); - } - - auto t = std::thread(&StirlingImpl::DeployFileSourceConnector, this, id, file_name); - t.detach(); -} - StatusOr StirlingImpl::GetTracepointInfo(sole::uuid trace_id) { absl::base_internal::SpinLockHolder lock(&dynamic_trace_status_map_lock_); @@ -822,18 +664,6 @@ StatusOr StirlingImpl::GetTracepointInfo(sole::uuid trace_i return s; } -StatusOr StirlingImpl::GetFileSourceInfo(sole::uuid trace_id) { - absl::base_internal::SpinLockHolder lock(&file_source_status_map_lock_); - - auto iter = file_source_status_map_.find(trace_id); - if (iter == file_source_status_map_.end()) { - return error::NotFound("FileSource $0 not found.", trace_id.str()); - } - - StatusOr s = iter->second; - return s; -} - Status StirlingImpl::RemoveTracepoint(sole::uuid trace_id) { // Change the status of this trace to pending while we delete it. UpdateDynamicTraceStatus(trace_id, error::ResourceUnavailable("Probe removal in progress.")); @@ -844,16 +674,6 @@ Status StirlingImpl::RemoveTracepoint(sole::uuid trace_id) { return Status::OK(); } -Status StirlingImpl::RemoveFileSource(sole::uuid trace_id) { - // Change the status of this trace to pending while we delete it. - UpdateFileSourceStatus(trace_id, error::ResourceUnavailable("file source removal in progress.")); - - auto t = std::thread(&StirlingImpl::DestroyFileSourceConnector, this, trace_id); - t.detach(); - - return Status::OK(); -} - void StirlingImpl::GetPublishProto(stirlingpb::Publish* publish_pb) { absl::base_internal::SpinLockHolder lock(&info_class_mgrs_lock_); PopulatePublishProto(publish_pb, info_class_mgrs_); diff --git a/src/stirling/stirling.h b/src/stirling/stirling.h index 86231e05193..16a1d65c6e0 100644 --- a/src/stirling/stirling.h +++ b/src/stirling/stirling.h @@ -122,10 +122,6 @@ class Stirling : public NotCopyable { * Returns the status of the probe registration for the trace identified by the input ID. */ virtual StatusOr GetTracepointInfo(sole::uuid trace_id) = 0; - virtual StatusOr GetFileSourceInfo(sole::uuid trace_id) = 0; - - virtual void RegisterFileSource(sole::uuid id, std::string file_name) = 0; - virtual Status RemoveFileSource(sole::uuid id) = 0; /** * Remove a dynamically created tracepoint. diff --git a/src/stirling/testing/common.h b/src/stirling/testing/common.h index ef8fda4a796..c754380eb34 100644 --- a/src/stirling/testing/common.h +++ b/src/stirling/testing/common.h @@ -176,7 +176,7 @@ inline types::ColumnWrapperRecordBatch ExtractRecordsMatchingPID(DataTable* data class Timeout { public: - explicit Timeout(std::chrono::nanoseconds timeout = std::chrono::minutes{1}) + explicit Timeout(std::chrono::nanoseconds timeout = std::chrono::minutes{5}) : timeout_(timeout), start_(std::chrono::steady_clock::now()) {} bool TimedOut() { return !((std::chrono::steady_clock::now() - start_) < timeout_); } diff --git a/src/stirling/testing/overloads.h b/src/stirling/testing/overloads.h index 8a4a8b008f1..e38c540d000 100644 --- a/src/stirling/testing/overloads.h +++ b/src/stirling/testing/overloads.h @@ -19,7 +19,7 @@ #pragma once #include -#include +#include #include "src/stirling/utils/monitor.h" #include "src/stirling/utils/tcp_stats.h" @@ -53,16 +53,6 @@ inline void PrintTo(const ProbeStatusRecord& r, std::ostream* os) { r.info); } -inline bool operator==(const StreamStatusRecord& a, const StreamStatusRecord& b) { - return (a.stream_id == b.stream_id) && (a.bytes_sent == b.bytes_sent) && (a.info == b.info); -} - -inline void PrintTo(const StreamStatusRecord& r, std::ostream* os) { - *os << absl::Substitute( - "StreamStatusRecord{timestamp_ns: $0, stream_id: $1, bytes_sent: $2, info: $3}", - r.timestamp_ns, r.stream_id, r.bytes_sent, r.info); -} - inline bool operator==(const TcpStatsRecord& a, const TcpStatsRecord& b) { return (a.remote_port == b.remote_port) && (a.remote_addr == b.remote_addr) && (a.tx == b.tx) && (a.rx == b.rx) && (a.retransmits == b.retransmits); diff --git a/src/stirling/testing/stirling_mock.h b/src/stirling/testing/stirling_mock.h index fad1c29e550..9a997af8a90 100644 --- a/src/stirling/testing/stirling_mock.h +++ b/src/stirling/testing/stirling_mock.h @@ -18,8 +18,6 @@ #pragma once -#include - #include #include #include @@ -42,9 +40,6 @@ class MockStirling : public Stirling { (override)); MOCK_METHOD(StatusOr, GetTracepointInfo, (sole::uuid trace_id), (override)); MOCK_METHOD(Status, RemoveTracepoint, (sole::uuid trace_id), (override)); - MOCK_METHOD(void, RegisterFileSource, (sole::uuid trace_id, std::string file_name), (override)); - MOCK_METHOD(StatusOr, GetFileSourceInfo, (sole::uuid trace_id), (override)); - MOCK_METHOD(Status, RemoveFileSource, (sole::uuid trace_id), (override)); MOCK_METHOD(void, GetPublishProto, (stirlingpb::Publish * publish_pb), (override)); MOCK_METHOD(void, RegisterDataPushCallback, (DataPushCallback f), (override)); MOCK_METHOD(void, RegisterAgentMetadataCallback, (AgentMetadataCallback f), (override)); diff --git a/src/stirling/utils/monitor.cc b/src/stirling/utils/monitor.cc index 2341f3ee018..673e92da35d 100644 --- a/src/stirling/utils/monitor.cc +++ b/src/stirling/utils/monitor.cc @@ -74,12 +74,6 @@ void StirlingMonitor::AppendProbeStatusRecord(const std::string& source_connecto {CurrentTimeNS(), source_connector, tracepoint, status.code(), status.msg(), info}); } -void StirlingMonitor::AppendStreamStatusRecord(const std::string& stream_id, - const int64_t bytes_sent, const std::string& info) { - absl::base_internal::SpinLockHolder lock(&stream_status_lock_); - stream_status_records_.push_back({CurrentTimeNS(), stream_id, bytes_sent, info}); -} - std::vector StirlingMonitor::ConsumeSourceStatusRecords() { absl::base_internal::SpinLockHolder lock(&source_status_lock_); return std::move(source_status_records_); @@ -90,10 +84,5 @@ std::vector StirlingMonitor::ConsumeProbeStatusRecords() { return std::move(probe_status_records_); } -std::vector StirlingMonitor::ConsumeStreamStatusRecords() { - absl::base_internal::SpinLockHolder lock(&stream_status_lock_); - return std::move(stream_status_records_); -} - } // namespace stirling } // namespace px diff --git a/src/stirling/utils/monitor.h b/src/stirling/utils/monitor.h index 596dfc7fed9..214a2f49e39 100644 --- a/src/stirling/utils/monitor.h +++ b/src/stirling/utils/monitor.h @@ -50,14 +50,6 @@ struct ProbeStatusRecord { std::string info = ""; }; -// Status of stream processing -struct StreamStatusRecord { - int64_t timestamp_ns = 0; - std::string stream_id = ""; - int64_t bytes_sent = 0; - std::string info = ""; -}; - class StirlingMonitor : NotCopyMoveable { public: static StirlingMonitor* GetInstance() { @@ -73,13 +65,10 @@ class StirlingMonitor : NotCopyMoveable { // Stirling Error Reporting. void AppendProbeStatusRecord(const std::string& source_connector, const std::string& tracepoint, const Status& status, const std::string& info); - void AppendStreamStatusRecord(const std::string& stream_id, const int64_t bytes_sent, - const std::string& info); void AppendSourceStatusRecord(const std::string& source_connector, const Status& status, const std::string& context); std::vector ConsumeProbeStatusRecords(); std::vector ConsumeSourceStatusRecords(); - std::vector ConsumeStreamStatusRecords(); static constexpr auto kCrashWindow = std::chrono::seconds{5}; @@ -92,13 +81,10 @@ class StirlingMonitor : NotCopyMoveable { std::vector probe_status_records_ ABSL_GUARDED_BY(probe_status_lock_); // Records of Stirling Source Connector status. std::vector source_status_records_ ABSL_GUARDED_BY(source_status_lock_); - // Records of Stirling stream connector status. - std::vector stream_status_records_ ABSL_GUARDED_BY(stream_status_lock_); // Lock to protect probe and source records. absl::base_internal::SpinLock probe_status_lock_; absl::base_internal::SpinLock source_status_lock_; - absl::base_internal::SpinLock stream_status_lock_; prometheus::Counter& java_proc_crashed_during_attach_; }; diff --git a/src/table_store/schema/relation.cc b/src/table_store/schema/relation.cc index d2ca4a35605..da087835e1b 100644 --- a/src/table_store/schema/relation.cc +++ b/src/table_store/schema/relation.cc @@ -38,11 +38,6 @@ Relation::Relation() = default; Relation::Relation(ColTypeArray col_types, ColNameArray col_names) : Relation(col_types, col_names, ColDescArray(col_types.size(), "")) {} -Relation::Relation(ColTypeArray col_types, ColNameArray col_names, std::optional mutation_id) - : Relation(col_types, col_names, ColDescArray(col_types.size(), "")) { - mutation_id_ = mutation_id; - } - Relation::Relation(ColTypeArray col_types, ColNameArray col_names, ColDescArray col_desc) : Relation(col_types, col_names, col_desc, ColSemanticTypeArray(col_types.size(), types::ST_NONE)) {} @@ -166,9 +161,6 @@ std::string Relation::DebugString() const { for (size_t i = 0; i < col_types_.size(); ++i) { col_info_as_str.push_back(absl::StrCat(col_names_[i], ":", types::ToString(col_types_[i]))); } - if (mutation_id_.has_value()) { - col_info_as_str.push_back(absl::Substitute("mutation_id:$0", mutation_id_.value())); - } return "[" + absl::StrJoin(col_info_as_str, ", ") + "]"; } @@ -181,9 +173,6 @@ Status Relation::ToProto(table_store::schemapb::Relation* relation_proto) const col_pb->set_column_name(GetColumnName(col_idx)); col_pb->set_column_semantic_type(GetColumnSemanticType(col_idx)); } - if (mutation_id_.has_value()) { - relation_proto->set_mutation_id(mutation_id_.value()); - } return Status::OK(); } Status Relation::FromProto(const table_store::schemapb::Relation* relation_pb) { @@ -195,9 +184,6 @@ Status Relation::FromProto(const table_store::schemapb::Relation* relation_pb) { auto column = relation_pb->columns(idx); AddColumn(column.column_type(), column.column_name(), column.column_semantic_type()); } - if (relation_pb->mutation_id().size() > 0) { - mutation_id_ = relation_pb->mutation_id(); - } return Status::OK(); } diff --git a/src/table_store/schema/relation.h b/src/table_store/schema/relation.h index 5f45cdfe9d4..f0105b65c00 100644 --- a/src/table_store/schema/relation.h +++ b/src/table_store/schema/relation.h @@ -43,7 +43,6 @@ class Relation { Relation(); // Constructor for Relation that initializes with a list of column types. explicit Relation(ColTypeArray col_types, ColNameArray col_names); - explicit Relation(ColTypeArray col_types, ColNameArray col_names, std::optional mutation_id); explicit Relation(ColTypeArray col_types, ColNameArray col_names, ColDescArray col_desc); explicit Relation(ColTypeArray col_types, ColNameArray col_names, ColSemanticTypeArray col_semantic_types); @@ -119,15 +118,12 @@ class Relation { return out << relation.DebugString(); } - std::optional mutation_id() const { return mutation_id_; } - private: ColTypeArray col_types_; ColNameArray col_names_; ColDescArray col_desc_; ColSemanticTypeArray col_semantic_types_; ColPatternTypeArray col_pattern_types_; - std::optional mutation_id_; }; } // namespace schema diff --git a/src/table_store/schemapb/schema.pb.go b/src/table_store/schemapb/schema.pb.go index f7a66f48acf..93aada07afb 100755 --- a/src/table_store/schemapb/schema.pb.go +++ b/src/table_store/schemapb/schema.pb.go @@ -486,9 +486,8 @@ func (m *RowBatchData) GetEos() bool { } type Relation struct { - Columns []*Relation_ColumnInfo `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"` - Desc string `protobuf:"bytes,2,opt,name=desc,proto3" json:"desc,omitempty"` - MutationId string `protobuf:"bytes,3,opt,name=mutation_id,json=mutationId,proto3" json:"mutation_id,omitempty"` + Columns []*Relation_ColumnInfo `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"` + Desc string `protobuf:"bytes,2,opt,name=desc,proto3" json:"desc,omitempty"` } func (m *Relation) Reset() { *m = Relation{} } @@ -537,13 +536,6 @@ func (m *Relation) GetDesc() string { return "" } -func (m *Relation) GetMutationId() string { - if m != nil { - return m.MutationId - } - return "" -} - type Relation_ColumnInfo struct { ColumnName string `protobuf:"bytes,1,opt,name=column_name,json=columnName,proto3" json:"column_name,omitempty"` ColumnType typespb.DataType `protobuf:"varint,2,opt,name=column_type,json=columnType,proto3,enum=px.types.DataType" json:"column_type,omitempty"` @@ -742,59 +734,58 @@ func init() { } var fileDescriptor_837edaf494876c32 = []byte{ - // 825 bytes of a gzipped FileDescriptorProto + // 810 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0x77, 0xb3, 0xb1, 0xeb, 0xbc, 0xdd, 0x54, 0x65, 0x54, 0xa0, 0xe4, 0xb0, 0xa1, 0x6e, - 0x02, 0x3d, 0x20, 0x1b, 0x92, 0x2a, 0x44, 0x95, 0xe0, 0x60, 0xda, 0xc8, 0x51, 0x85, 0x85, 0xc6, - 0xe1, 0xc2, 0x01, 0x6b, 0xbc, 0x9e, 0x26, 0x2b, 0x76, 0x77, 0x56, 0x3b, 0xe3, 0x3a, 0xbe, 0x21, - 0x2e, 0x5c, 0xe1, 0x2b, 0x70, 0xe2, 0x63, 0x70, 0xe4, 0x98, 0x63, 0x85, 0x10, 0x22, 0xce, 0x85, - 0x63, 0x3f, 0x02, 0x9a, 0x37, 0xb3, 0xf1, 0x1a, 0x75, 0x49, 0x2f, 0xc9, 0xec, 0xec, 0xff, 0xfd, - 0xe6, 0xaf, 0xf7, 0x7f, 0xe3, 0x85, 0x5d, 0x59, 0x44, 0x5d, 0xc5, 0xc6, 0x09, 0x1f, 0x49, 0x25, - 0x0a, 0xde, 0x95, 0xd1, 0x19, 0x4f, 0x59, 0x3e, 0xb6, 0x8b, 0x4e, 0x5e, 0x08, 0x25, 0xc8, 0xbb, - 0xf9, 0x79, 0xa7, 0xa2, 0xea, 0x94, 0xaa, 0xad, 0xbb, 0xa7, 0xe2, 0x54, 0xa0, 0xa6, 0xab, 0x57, - 0x46, 0xbe, 0xb5, 0xa3, 0xa9, 0xf2, 0x8c, 0x15, 0x7c, 0xd2, 0x55, 0xf3, 0x9c, 0x4b, 0xf3, 0x37, - 0x1f, 0x9b, 0xff, 0x46, 0xd5, 0x7e, 0x00, 0x9b, 0x3d, 0x21, 0x12, 0xce, 0xb2, 0x2f, 0x44, 0x32, - 0x4d, 0x33, 0x42, 0x60, 0x7d, 0xc2, 0x14, 0xbb, 0xe7, 0xbe, 0xef, 0x3d, 0x6c, 0x51, 0x5c, 0xb7, - 0xef, 0x83, 0x7f, 0x9c, 0xa9, 0x83, 0x47, 0xaf, 0x91, 0x78, 0x56, 0x72, 0x00, 0x9b, 0x5f, 0x1f, - 0x67, 0xea, 0x93, 0xbd, 0x43, 0x2b, 0xda, 0xad, 0x88, 0xfc, 0xbd, 0xb7, 0x3a, 0xda, 0x3c, 0x9e, - 0x6b, 0x65, 0xb6, 0xee, 0x01, 0x6c, 0x1e, 0x25, 0x82, 0xbd, 0x1e, 0xee, 0x5a, 0xd1, 0x0e, 0xdc, - 0x3e, 0x89, 0x53, 0x7e, 0xf0, 0x68, 0x30, 0xfc, 0x1f, 0x0b, 0xdf, 0x42, 0x30, 0x54, 0x45, 0x9c, - 0x9d, 0x5a, 0xcd, 0xa0, 0xa2, 0x09, 0x7a, 0x8f, 0xff, 0xf8, 0x6b, 0xfb, 0x20, 0x3f, 0xef, 0x4c, - 0xf8, 0x8b, 0x6e, 0x1e, 0x9f, 0xc7, 0xbc, 0x5b, 0xdb, 0x75, 0xe3, 0xd3, 0xb0, 0x9e, 0x30, 0xc5, - 0x2c, 0xff, 0x4f, 0x0f, 0x9a, 0x16, 0xfd, 0x0c, 0x82, 0xb1, 0xe9, 0xda, 0xc8, 0x1e, 0xe1, 0x3e, - 0xf4, 0xf7, 0x3e, 0xe8, 0xd4, 0x24, 0xd4, 0x59, 0x69, 0x71, 0xdf, 0xa1, 0xbe, 0xad, 0xd6, 0x74, - 0xf2, 0x14, 0x20, 0xd6, 0xdd, 0x35, 0xa8, 0x35, 0x44, 0xed, 0xd4, 0xa2, 0x2a, 0x41, 0xf4, 0x1d, - 0xba, 0x81, 0x95, 0x88, 0x79, 0x06, 0xc1, 0x34, 0xc6, 0xd6, 0x1a, 0x90, 0x77, 0x83, 0xa7, 0x95, - 0xb8, 0xb4, 0x27, 0x5b, 0x8d, 0xb0, 0x01, 0x6c, 0x2a, 0xec, 0x78, 0x26, 0x0d, 0x6d, 0x1d, 0x69, - 0x1f, 0xd6, 0xd2, 0x56, 0xf3, 0xe9, 0x3b, 0x34, 0x28, 0xeb, 0x4b, 0x73, 0xcf, 0x4d, 0xcc, 0x06, - 0xd7, 0xb8, 0xc1, 0xdc, 0xca, 0x4c, 0x68, 0x73, 0xb6, 0x1a, 0x61, 0x7d, 0xf0, 0x25, 0x86, 0x63, - 0x58, 0x4d, 0x64, 0xed, 0xd6, 0xb2, 0xaa, 0x43, 0xd1, 0x77, 0x28, 0xc8, 0xeb, 0x60, 0x7b, 0x00, - 0xad, 0x48, 0x24, 0x88, 0x69, 0xff, 0xe0, 0x42, 0x40, 0xc5, 0xac, 0xc7, 0x54, 0x74, 0x86, 0xc7, - 0xec, 0xc3, 0x7a, 0x24, 0x12, 0x69, 0x27, 0x78, 0xbb, 0x96, 0x6f, 0xc8, 0x14, 0xc5, 0xe4, 0x3d, - 0x68, 0x65, 0xd3, 0x74, 0x54, 0x88, 0x99, 0xc4, 0x28, 0x3d, 0x7a, 0x2b, 0x9b, 0xa6, 0x54, 0xcc, - 0x24, 0xb9, 0x03, 0x1e, 0x17, 0x33, 0xcc, 0xa5, 0x45, 0xf5, 0xd2, 0xec, 0x48, 0xec, 0x2d, 0xee, - 0xc8, 0xf6, 0xcf, 0x1e, 0xb4, 0x28, 0x4f, 0x98, 0x8a, 0x45, 0x46, 0x8e, 0xe0, 0x56, 0x84, 0xec, - 0xd2, 0xc3, 0x47, 0xb5, 0x1e, 0xca, 0x1a, 0x6b, 0xe6, 0x38, 0x7b, 0x2e, 0x68, 0x59, 0x8c, 0x97, - 0x85, 0xcb, 0x08, 0xfd, 0x6c, 0x50, 0x5c, 0x93, 0x6d, 0xf0, 0xd3, 0xa9, 0xc2, 0x9a, 0x51, 0x3c, - 0x41, 0x53, 0x1b, 0x14, 0xca, 0xad, 0xe3, 0xc9, 0xd6, 0x8f, 0x6b, 0x00, 0x4b, 0x98, 0xd6, 0x1b, - 0xdc, 0x28, 0x63, 0x29, 0xc7, 0x81, 0xdf, 0xa0, 0x60, 0xb6, 0x06, 0x2c, 0xe5, 0x64, 0xff, 0x5a, - 0xa0, 0xaf, 0x0f, 0x9e, 0x75, 0x7b, 0x8f, 0x2c, 0xaf, 0xbd, 0x6e, 0xe9, 0xc9, 0x3c, 0xe7, 0x65, - 0x91, 0x5e, 0x57, 0xa8, 0x68, 0xd0, 0xab, 0x52, 0x9f, 0x68, 0x9b, 0x7d, 0xb8, 0x6b, 0x05, 0x92, - 0xa7, 0x2c, 0x53, 0x71, 0x64, 0xf0, 0xeb, 0x88, 0x7f, 0x67, 0x89, 0x1f, 0xda, 0xd7, 0x78, 0x04, - 0x31, 0x35, 0xd5, 0x3d, 0x72, 0x08, 0x41, 0xce, 0x94, 0xe2, 0x85, 0x35, 0xd8, 0x40, 0xc2, 0xdb, - 0x4b, 0xc2, 0x57, 0xe6, 0x2d, 0x02, 0xfc, 0x7c, 0xf9, 0xd0, 0xfe, 0xc5, 0x85, 0xc6, 0x89, 0x6e, - 0x3a, 0xf9, 0x0c, 0x5a, 0x85, 0x6d, 0xb4, 0xbd, 0x10, 0xf7, 0x6f, 0x4c, 0x84, 0x5e, 0x97, 0x90, - 0x23, 0xf0, 0x0b, 0x31, 0x1b, 0x8d, 0xf5, 0x84, 0x71, 0x79, 0xaf, 0x81, 0x99, 0xd6, 0xcf, 0x6d, - 0x75, 0x18, 0x29, 0x14, 0xf6, 0x89, 0x63, 0x9e, 0x18, 0x42, 0xd3, 0xe4, 0xa9, 0xd7, 0xed, 0xdf, - 0x5c, 0x68, 0x0e, 0xb1, 0x92, 0x0c, 0x21, 0x28, 0x8f, 0x1c, 0xa5, 0x2c, 0xb7, 0xb3, 0xf3, 0x71, - 0xfd, 0xfd, 0x30, 0x1f, 0x99, 0xd2, 0xf0, 0x97, 0x2c, 0x7f, 0x9a, 0xa9, 0x62, 0x4e, 0xfd, 0x62, - 0xb9, 0xb3, 0xc5, 0xe0, 0xce, 0x7f, 0x05, 0x7a, 0x7c, 0xbf, 0xe3, 0x73, 0x3b, 0x0b, 0x7a, 0x49, - 0x3e, 0x85, 0xc6, 0x0b, 0x96, 0x4c, 0xb9, 0xfd, 0x15, 0x7b, 0x83, 0xee, 0x18, 0xfd, 0xe3, 0xb5, - 0x43, 0xb7, 0xf7, 0xf9, 0xc5, 0x65, 0xe8, 0xbc, 0xbc, 0x0c, 0x9d, 0x57, 0x97, 0xa1, 0xfb, 0xfd, - 0x22, 0x74, 0x7f, 0x5d, 0x84, 0xee, 0xef, 0x8b, 0xd0, 0xbd, 0x58, 0x84, 0xee, 0xdf, 0x8b, 0xd0, - 0xfd, 0x67, 0x11, 0x3a, 0xaf, 0x16, 0xa1, 0xfb, 0xd3, 0x55, 0xe8, 0x5c, 0x5c, 0x85, 0xce, 0xcb, - 0xab, 0xd0, 0xf9, 0xa6, 0x55, 0x32, 0xc7, 0x4d, 0xfc, 0xa2, 0xed, 0xff, 0x1b, 0x00, 0x00, 0xff, - 0xff, 0x2c, 0x17, 0xd8, 0xc3, 0x4f, 0x07, 0x00, 0x00, + 0x14, 0xc7, 0x77, 0xe3, 0xd8, 0x75, 0xde, 0x6e, 0xaa, 0x32, 0x2a, 0x50, 0x7c, 0xd8, 0x50, 0x37, + 0x81, 0x1e, 0x90, 0x0d, 0x4e, 0x65, 0xa2, 0x4a, 0x70, 0x30, 0x6d, 0xe4, 0xa8, 0xc2, 0x42, 0xe3, + 0x70, 0xe1, 0x80, 0x35, 0xde, 0x4c, 0x93, 0x15, 0xbb, 0x3b, 0xab, 0x9d, 0x71, 0x1d, 0xdf, 0x10, + 0x17, 0xae, 0x7c, 0x06, 0x4e, 0x7c, 0x0c, 0x8e, 0x1c, 0x73, 0xac, 0x10, 0x42, 0xc4, 0xb9, 0x70, + 0xcc, 0x47, 0x40, 0xf3, 0x66, 0x36, 0x5e, 0xa3, 0x2e, 0xe1, 0x92, 0x3c, 0x8f, 0xff, 0xef, 0x37, + 0x7f, 0xbd, 0xff, 0x5b, 0x2f, 0xec, 0xc9, 0x3c, 0xec, 0x2a, 0x36, 0x8d, 0xf9, 0x44, 0x2a, 0x91, + 0xf3, 0xae, 0x0c, 0xcf, 0x78, 0xc2, 0xb2, 0xa9, 0x2d, 0x3a, 0x59, 0x2e, 0x94, 0x20, 0xef, 0x66, + 0xe7, 0x9d, 0x92, 0xaa, 0x53, 0xa8, 0x5a, 0xf7, 0x4f, 0xc5, 0xa9, 0x40, 0x4d, 0x57, 0x57, 0x46, + 0xde, 0xda, 0xd5, 0x54, 0x79, 0xc6, 0x72, 0x7e, 0xd2, 0x55, 0x8b, 0x8c, 0x4b, 0xf3, 0x37, 0x9b, + 0x9a, 0xff, 0x46, 0xd5, 0x7e, 0x04, 0xdb, 0x03, 0x21, 0x62, 0xce, 0xd2, 0x2f, 0x44, 0x3c, 0x4b, + 0x52, 0x42, 0x60, 0xf3, 0x84, 0x29, 0xf6, 0xc0, 0x7d, 0xbf, 0xf6, 0xb8, 0x49, 0xb1, 0x6e, 0x3f, + 0x04, 0xef, 0x28, 0x55, 0xfd, 0x27, 0x6f, 0x90, 0xd4, 0xac, 0xa4, 0x0f, 0xdb, 0x5f, 0x1f, 0xa5, + 0xea, 0x93, 0xde, 0x81, 0x15, 0xed, 0x95, 0x44, 0x5e, 0xef, 0xad, 0x8e, 0x36, 0x8f, 0xf7, 0x5a, + 0x99, 0xed, 0x7b, 0x04, 0xdb, 0x87, 0xb1, 0x60, 0x6f, 0x86, 0xbb, 0x56, 0xb4, 0x0b, 0x77, 0x8f, + 0xa3, 0x84, 0xf7, 0x9f, 0x8c, 0xc6, 0xff, 0x61, 0xe1, 0x5b, 0xf0, 0xc7, 0x2a, 0x8f, 0xd2, 0x53, + 0xab, 0x19, 0x95, 0x34, 0xfe, 0xe0, 0xe9, 0xef, 0x7f, 0xee, 0xf4, 0xb3, 0xf3, 0xce, 0x09, 0x7f, + 0xd5, 0xcd, 0xa2, 0xf3, 0x88, 0x77, 0x2b, 0xa7, 0x6e, 0x7c, 0x1a, 0xd6, 0x33, 0xa6, 0x98, 0xe5, + 0xff, 0x51, 0x83, 0x86, 0x45, 0xbf, 0x00, 0x7f, 0x6a, 0xa6, 0x36, 0xb1, 0x57, 0xb8, 0x8f, 0xbd, + 0xde, 0x07, 0x9d, 0x8a, 0x84, 0x3a, 0x6b, 0x23, 0x1e, 0x3a, 0xd4, 0xb3, 0xdd, 0x9a, 0x4e, 0x9e, + 0x03, 0x44, 0x7a, 0xba, 0x06, 0xb5, 0x81, 0xa8, 0xdd, 0x4a, 0x54, 0x29, 0x88, 0xa1, 0x43, 0xb7, + 0xb0, 0x13, 0x31, 0x2f, 0xc0, 0x9f, 0x45, 0x38, 0x5a, 0x03, 0xaa, 0xdd, 0xe2, 0x69, 0x2d, 0x2e, + 0xed, 0xc9, 0x76, 0x23, 0x6c, 0x04, 0xdb, 0x0a, 0x27, 0x9e, 0x4a, 0x43, 0xdb, 0x44, 0xda, 0x87, + 0x95, 0xb4, 0xf5, 0x7c, 0x86, 0x0e, 0xf5, 0x8b, 0xfe, 0xc2, 0xdc, 0x4b, 0x13, 0xb3, 0xc1, 0xd5, + 0x6f, 0x31, 0xb7, 0xb6, 0x13, 0xda, 0x9c, 0xed, 0x46, 0xd8, 0x10, 0x3c, 0x89, 0xe1, 0x18, 0x56, + 0x03, 0x59, 0x7b, 0x95, 0xac, 0xf2, 0x52, 0x0c, 0x1d, 0x0a, 0xf2, 0x26, 0xd8, 0x01, 0x40, 0x33, + 0x14, 0x31, 0x62, 0xda, 0x3f, 0xb8, 0xe0, 0x53, 0x31, 0x1f, 0x30, 0x15, 0x9e, 0xe1, 0x35, 0xfb, + 0xb0, 0x19, 0x8a, 0x58, 0xda, 0x0d, 0xde, 0xa9, 0xe4, 0x1b, 0x32, 0x45, 0x31, 0x79, 0x0f, 0x9a, + 0xe9, 0x2c, 0x99, 0xe4, 0x62, 0x2e, 0x31, 0xca, 0x1a, 0xbd, 0x93, 0xce, 0x12, 0x2a, 0xe6, 0x92, + 0xdc, 0x83, 0x1a, 0x17, 0x73, 0xcc, 0xa5, 0x49, 0x75, 0x69, 0x4e, 0x24, 0xce, 0x16, 0x4f, 0x64, + 0xfb, 0x7a, 0x03, 0x9a, 0x94, 0xc7, 0x4c, 0x45, 0x22, 0x25, 0x87, 0x70, 0x27, 0x44, 0x76, 0xe1, + 0xe1, 0xa3, 0x4a, 0x0f, 0x45, 0x8f, 0x35, 0x73, 0x94, 0xbe, 0x14, 0xb4, 0x68, 0xc6, 0x87, 0x85, + 0xcb, 0x10, 0xfd, 0x6c, 0x51, 0xac, 0x5b, 0x3f, 0x6e, 0x00, 0xac, 0xb4, 0x64, 0x07, 0x3c, 0xa3, + 0x9e, 0xa4, 0x2c, 0xe1, 0xb8, 0xcf, 0x5b, 0x14, 0xcc, 0xd1, 0x88, 0x25, 0x9c, 0xec, 0xdf, 0x08, + 0xf4, 0xd3, 0x81, 0xa8, 0xbb, 0x3d, 0xb2, 0x7a, 0xaa, 0xf5, 0xc4, 0x8e, 0x17, 0x19, 0x2f, 0x9a, + 0x74, 0x5d, 0xa2, 0xe2, 0xfd, 0xb5, 0x32, 0xf5, 0x19, 0x97, 0x21, 0x19, 0xc2, 0x7d, 0x2b, 0x90, + 0x3c, 0x61, 0xa9, 0x8a, 0x42, 0x83, 0xdf, 0x44, 0xfc, 0x3b, 0x2b, 0xfc, 0xd8, 0x7e, 0x8d, 0x57, + 0x10, 0xd3, 0x53, 0x3e, 0x23, 0x07, 0xe0, 0x67, 0x4c, 0x29, 0x9e, 0x5b, 0x83, 0x75, 0x24, 0xbc, + 0xbd, 0x22, 0x7c, 0x65, 0xbe, 0x45, 0x80, 0x97, 0xad, 0x3e, 0xb4, 0x7f, 0x76, 0xa1, 0x7e, 0xac, + 0x67, 0x4a, 0x3e, 0x83, 0x66, 0x6e, 0xe7, 0x68, 0xf7, 0xfd, 0xe1, 0xad, 0x03, 0xa7, 0x37, 0x2d, + 0xe4, 0x10, 0xbc, 0x5c, 0xcc, 0x27, 0x53, 0xbd, 0x40, 0x5c, 0x3e, 0xa8, 0x63, 0x64, 0xd5, 0x6b, + 0x59, 0xde, 0x35, 0x0a, 0xb9, 0xfd, 0xc4, 0x31, 0x2e, 0x0c, 0xa1, 0x61, 0xe2, 0xd2, 0x75, 0xfb, + 0x57, 0x17, 0x1a, 0x63, 0xec, 0x24, 0x63, 0xf0, 0x8b, 0x2b, 0x27, 0x09, 0xcb, 0xec, 0x6a, 0x7c, + 0x5c, 0xbd, 0xfe, 0xe6, 0x1d, 0x52, 0x18, 0xfe, 0x92, 0x65, 0xcf, 0x53, 0x95, 0x2f, 0xa8, 0x97, + 0xaf, 0x4e, 0x5a, 0x0c, 0xee, 0xfd, 0x5b, 0xa0, 0xb7, 0xf3, 0x3b, 0xbe, 0xb0, 0xbb, 0xa0, 0x4b, + 0xf2, 0x29, 0xd4, 0x5f, 0xb1, 0x78, 0xc6, 0xed, 0x8f, 0xd4, 0xff, 0x98, 0x8e, 0xd1, 0x3f, 0xdd, + 0x38, 0x70, 0x07, 0x9f, 0x5f, 0x5c, 0x06, 0xce, 0xeb, 0xcb, 0xc0, 0xb9, 0xbe, 0x0c, 0xdc, 0xef, + 0x97, 0x81, 0xfb, 0xcb, 0x32, 0x70, 0x7f, 0x5b, 0x06, 0xee, 0xc5, 0x32, 0x70, 0xff, 0x5a, 0x06, + 0xee, 0xdf, 0xcb, 0xc0, 0xb9, 0x5e, 0x06, 0xee, 0x4f, 0x57, 0x81, 0x73, 0x71, 0x15, 0x38, 0xaf, + 0xaf, 0x02, 0xe7, 0x9b, 0x66, 0xc1, 0x9c, 0x36, 0xf0, 0x85, 0xb5, 0xff, 0x4f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xc4, 0x65, 0x33, 0xdc, 0x2e, 0x07, 0x00, 0x00, } func (this *BooleanColumn) Equal(that interface{}) bool { @@ -1213,9 +1204,6 @@ func (this *Relation) Equal(that interface{}) bool { if this.Desc != that1.Desc { return false } - if this.MutationId != that1.MutationId { - return false - } return true } func (this *Relation_ColumnInfo) Equal(that interface{}) bool { @@ -1459,13 +1447,12 @@ func (this *Relation) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 6) s = append(s, "&schemapb.Relation{") if this.Columns != nil { s = append(s, "Columns: "+fmt.Sprintf("%#v", this.Columns)+",\n") } s = append(s, "Desc: "+fmt.Sprintf("%#v", this.Desc)+",\n") - s = append(s, "MutationId: "+fmt.Sprintf("%#v", this.MutationId)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1996,13 +1983,6 @@ func (m *Relation) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.MutationId) > 0 { - i -= len(m.MutationId) - copy(dAtA[i:], m.MutationId) - i = encodeVarintSchema(dAtA, i, uint64(len(m.MutationId))) - i-- - dAtA[i] = 0x1a - } if len(m.Desc) > 0 { i -= len(m.Desc) copy(dAtA[i:], m.Desc) @@ -2405,10 +2385,6 @@ func (m *Relation) Size() (n int) { if l > 0 { n += 1 + l + sovSchema(uint64(l)) } - l = len(m.MutationId) - if l > 0 { - n += 1 + l + sovSchema(uint64(l)) - } return n } @@ -2654,7 +2630,6 @@ func (this *Relation) String() string { s := strings.Join([]string{`&Relation{`, `Columns:` + repeatedStringForColumns + `,`, `Desc:` + fmt.Sprintf("%v", this.Desc) + `,`, - `MutationId:` + fmt.Sprintf("%v", this.MutationId) + `,`, `}`, }, "") return s @@ -3861,38 +3836,6 @@ func (m *Relation) Unmarshal(dAtA []byte) error { } m.Desc = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MutationId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSchema - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSchema - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSchema - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MutationId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipSchema(dAtA[iNdEx:]) diff --git a/src/table_store/schemapb/schema.proto b/src/table_store/schemapb/schema.proto index 5762aed73d2..25b11d7f6c4 100644 --- a/src/table_store/schemapb/schema.proto +++ b/src/table_store/schemapb/schema.proto @@ -90,8 +90,6 @@ message Relation { repeated ColumnInfo columns = 1; // Description of the table. string desc = 2; - // Mutation id of the table if one exists. - string mutation_id = 3; } // A table serialized as proto. diff --git a/src/table_store/table/internal/store_with_row_accounting.h b/src/table_store/table/internal/store_with_row_accounting.h index d26f18816fa..842f91b8b81 100644 --- a/src/table_store/table/internal/store_with_row_accounting.h +++ b/src/table_store/table/internal/store_with_row_accounting.h @@ -53,8 +53,6 @@ void constexpr_else_static_assert_false() { static_assert(always_false, "constexpr else block reached"); } -class HotOnlyStore; - /** * StoreWithRowTimeAccounting stores a deque of batches (hot or cold) and keeps track of the first * and last unique RowID's for each batch, as well as the first and last times for each batch (if @@ -77,28 +75,12 @@ class StoreWithRowTimeAccounting { StoreWithRowTimeAccounting(const schema::Relation& rel, int64_t time_col_idx) : rel_(rel), time_col_idx_(time_col_idx) {} - Status AddBatchSliceToRowBatch(const TBatch& batch, size_t row_offset, size_t batch_size, - const std::vector& cols, - schema::RowBatch* output_rb) const { - if constexpr (std::is_same_v) { - for (auto col_idx : cols) { - auto arr = batch[col_idx]->Slice(row_offset, batch_size); - PX_RETURN_IF_ERROR(output_rb->AddColumn(arr)); - } - return Status::OK(); - } else if constexpr (std::is_same_v) { - return batch.AddBatchSliceToRowBatch(row_offset, batch_size, cols, output_rb); - } else { - constexpr_else_static_assert_false(); - } - } - /** * GetNextRowBatch returns the next row batch in this store after the given unique row id. * @param last_read_row_id, pointer to the unique RowID of the last read row. The outputted batch * should include only rows with a RowID greater than this RowID. After determining the output * batch, this pointer is updated to point to the RowID of the last row in the outputted batch. - * @param hints, pointer to a BatchHints object (usually from a Cursor), that provides a + * @param hints, pointer to a BatchHints object (usually from a Table::Cursor), that provides a * hint to the store about which batch should be next. If the hint is correct, no searching for * the right batch is required, otherwise searching is performed as usual. This is purely an * optimization and passing a `nullptr` for hints is accepted. @@ -176,16 +158,16 @@ class StoreWithRowTimeAccounting { * PopFront removes the first batch in the store, and returns an rvalue reference to it. * @return rvalue reference to the removed batch. */ - TBatch PopFront() { + TBatch&& PopFront() { DCHECK(!batches_.empty()); first_batch_id_++; row_ids_.pop_front(); if (time_col_idx_ != -1) times_.pop_front(); - auto front = std::move(batches_.front()); + auto&& front = std::move(batches_.front()); batches_.pop_front(); - return front; + return std::move(front); } /** @@ -402,14 +384,28 @@ class StoreWithRowTimeAccounting { } } + Status AddBatchSliceToRowBatch(const TBatch& batch, size_t row_offset, size_t batch_size, + const std::vector& cols, + schema::RowBatch* output_rb) const { + if constexpr (std::is_same_v) { + for (auto col_idx : cols) { + auto arr = batch[col_idx]->Slice(row_offset, batch_size); + PX_RETURN_IF_ERROR(output_rb->AddColumn(arr)); + } + return Status::OK(); + } else if constexpr (std::is_same_v) { + return batch.AddBatchSliceToRowBatch(row_offset, batch_size, cols, output_rb); + } else { + constexpr_else_static_assert_false(); + } + } + BatchID first_batch_id_ = 0; const schema::Relation& rel_; const int64_t time_col_idx_; std::deque batches_; std::deque row_ids_; std::deque times_; - - friend HotOnlyStore; }; } // namespace internal diff --git a/src/table_store/table/table.cc b/src/table_store/table/table.cc index 6ec35efd369..a3bac23f4e5 100644 --- a/src/table_store/table/table.cc +++ b/src/table_store/table/table.cc @@ -48,13 +48,13 @@ DEFINE_int32(table_store_table_size_limit, namespace px { namespace table_store { -Cursor::Cursor(const Table* table, StartSpec start, StopSpec stop) +Table::Cursor::Cursor(const Table* table, StartSpec start, StopSpec stop) : table_(table), hints_(internal::BatchHints{}) { AdvanceToStart(start); StopStateFromSpec(std::move(stop)); } -void Cursor::AdvanceToStart(const StartSpec& start) { +void Table::Cursor::AdvanceToStart(const StartSpec& start) { switch (start.type) { case StartSpec::StartType::StartAtTime: { last_read_row_id_ = table_->FindRowIDFromTimeFirstGreaterThanOrEqual(start.start_time) - 1; @@ -71,7 +71,7 @@ void Cursor::AdvanceToStart(const StartSpec& start) { } } -void Cursor::UpdateStopStateForStopAtTime() { +void Table::Cursor::UpdateStopStateForStopAtTime() { if (stop_.stop_row_id_final) { // Once stop_row_id is set, we know the stop time is already within the table so we don't have // to update it anymore. @@ -85,7 +85,7 @@ void Cursor::UpdateStopStateForStopAtTime() { } } -void Cursor::StopStateFromSpec(StopSpec&& stop) { +void Table::Cursor::StopStateFromSpec(StopSpec&& stop) { stop_.spec = std::move(stop); switch (stop_.spec.type) { case StopSpec::StopType::CurrentEndOfTable: { @@ -110,7 +110,7 @@ void Cursor::StopStateFromSpec(StopSpec&& stop) { } } -bool Cursor::NextBatchReady() { +bool Table::Cursor::NextBatchReady() { switch (stop_.spec.type) { case StopSpec::StopType::StopAtTimeOrEndOfTable: case StopSpec::StopType::CurrentEndOfTable: { @@ -127,7 +127,7 @@ bool Cursor::NextBatchReady() { return false; } -bool Cursor::Done() { +bool Table::Cursor::Done() { auto next_row_id = last_read_row_id_ + 1; switch (stop_.spec.type) { case StopSpec::StopType::StopAtTimeOrEndOfTable: @@ -149,28 +149,29 @@ bool Cursor::Done() { return false; } -void Cursor::UpdateStopSpec(Cursor::StopSpec stop) { StopStateFromSpec(std::move(stop)); } +void Table::Cursor::UpdateStopSpec(Cursor::StopSpec stop) { StopStateFromSpec(std::move(stop)); } -internal::RowID* Cursor::LastReadRowID() { return &last_read_row_id_; } +internal::RowID* Table::Cursor::LastReadRowID() { return &last_read_row_id_; } -internal::BatchHints* Cursor::Hints() { return &hints_; } +internal::BatchHints* Table::Cursor::Hints() { return &hints_; } -std::optional Cursor::StopRowID() const { +std::optional Table::Cursor::StopRowID() const { if (stop_.spec.type == StopSpec::StopType::Infinite) { return std::nullopt; } return stop_.stop_row_id; } -StatusOr> Cursor::GetNextRowBatch( +StatusOr> Table::Cursor::GetNextRowBatch( const std::vector& cols) { return table_->GetNextRowBatch(this, cols); } -HotColdTable::HotColdTable(std::string_view table_name, const schema::Relation& relation, - size_t max_table_size, size_t compacted_batch_size) - : Table(TableMetrics(&(GetMetricsRegistry()), std::string(table_name)), relation, - max_table_size), +Table::Table(std::string_view table_name, const schema::Relation& relation, size_t max_table_size, + size_t compacted_batch_size) + : metrics_(&(GetMetricsRegistry()), std::string(table_name)), + rel_(relation), + max_table_size_(max_table_size), compacted_batch_size_(compacted_batch_size), // TODO(james): move mem_pool into constructor. compactor_(rel_, arrow::default_memory_pool()) { @@ -188,7 +189,7 @@ HotColdTable::HotColdTable(std::string_view table_name, const schema::Relation& rel_, time_col_idx_); } -Status HotColdTable::ToProto(table_store::schemapb::Table* table_proto) const { +Status Table::ToProto(table_store::schemapb::Table* table_proto) const { CHECK(table_proto != nullptr); std::vector col_selector; for (int64_t i = 0; i < static_cast(rel_.NumColumns()); i++) { @@ -208,7 +209,7 @@ Status HotColdTable::ToProto(table_store::schemapb::Table* table_proto) const { return Status::OK(); } -StatusOr> HotColdTable::GetNextRowBatch( +StatusOr> Table::GetNextRowBatch( Cursor* cursor, const std::vector& cols) const { DCHECK(!cursor->Done()) << "Calling GetNextRowBatch on an exhausted Cursor"; absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); @@ -236,7 +237,91 @@ StatusOr> HotColdTable::GetNextRowBatch( return rb; } -Table::RowID HotColdTable::FirstRowID() const { +Status Table::ExpireRowBatches(int64_t row_batch_size) { + if (row_batch_size > max_table_size_) { + return error::InvalidArgument("RowBatch size ($0) is bigger than maximum table size ($1).", + row_batch_size, max_table_size_); + } + int64_t bytes; + { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + bytes = batch_size_accountant_->HotBytes() + batch_size_accountant_->ColdBytes(); + } + while (bytes + row_batch_size > max_table_size_) { + PX_RETURN_IF_ERROR(ExpireBatch()); + { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + bytes = batch_size_accountant_->HotBytes() + batch_size_accountant_->ColdBytes(); + } + { + absl::base_internal::SpinLockHolder lock(&stats_lock_); + batches_expired_++; + metrics_.batches_expired_counter.Increment(); + } + } + return Status::OK(); +} + +Status Table::WriteRowBatch(const schema::RowBatch& rb) { + // Don't write empty row batches. + if (rb.num_columns() == 0 || rb.ColumnAt(0)->length() == 0) { + return Status::OK(); + } + + internal::RecordOrRowBatch record_or_row_batch(rb); + + PX_RETURN_IF_ERROR(WriteHot(std::move(record_or_row_batch))); + return Status::OK(); +} + +Status Table::TransferRecordBatch( + std::unique_ptr record_batch) { + // Don't transfer over empty row batches. + if (record_batch->empty() || record_batch->at(0)->Size() == 0) { + return Status::OK(); + } + + auto record_batch_w_cache = internal::RecordBatchWithCache{ + std::move(record_batch), + std::vector(rel_.NumColumns()), + std::vector(rel_.NumColumns(), false), + }; + internal::RecordOrRowBatch record_or_row_batch(std::move(record_batch_w_cache)); + + PX_RETURN_IF_ERROR(WriteHot(std::move(record_or_row_batch))); + return Status::OK(); +} + +Status Table::WriteHot(internal::RecordOrRowBatch&& record_or_row_batch) { + // See BatchSizeAccountantNonMutableState for an explanation of the thread safety and necessity of + // NonMutableState. + auto batch_stats = internal::BatchSizeAccountant::CalcBatchStats( + ABSL_TS_UNCHECKED_READ(batch_size_accountant_)->NonMutableState(), record_or_row_batch); + + PX_RETURN_IF_ERROR(ExpireRowBatches(batch_stats.bytes)); + + { + absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); + auto batch_length = record_or_row_batch.Length(); + batch_size_accountant_->NewHotBatch(std::move(batch_stats)); + hot_store_->EmplaceBack(next_row_id_, std::move(record_or_row_batch)); + next_row_id_ += batch_length; + } + + { + absl::base_internal::SpinLockHolder lock(&stats_lock_); + ++batches_added_; + metrics_.batches_added_counter.Increment(); + bytes_added_ += batch_stats.bytes; + metrics_.bytes_added_counter.Increment(batch_stats.bytes); + } + + // Make sure locks are released for this call, since they are reacquired inside. + PX_RETURN_IF_ERROR(UpdateTableMetricGauges()); + return Status::OK(); +} + +Table::RowID Table::FirstRowID() const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); if (cold_store_->Size() > 0) { return cold_store_->FirstRowID(); @@ -248,7 +333,7 @@ Table::RowID HotColdTable::FirstRowID() const { return -1; } -Table::RowID HotColdTable::LastRowID() const { +Table::RowID Table::LastRowID() const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); if (hot_store_->Size() > 0) { @@ -260,7 +345,7 @@ Table::RowID HotColdTable::LastRowID() const { return -1; } -Table::Time HotColdTable::MaxTime() const { +Table::Time Table::MaxTime() const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); if (hot_store_->Size() > 0) { @@ -272,7 +357,7 @@ Table::Time HotColdTable::MaxTime() const { return -1; } -Table::RowID HotColdTable::FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const { +Table::RowID Table::FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); auto optional_row_id = cold_store_->FindRowIDFromTimeFirstGreaterThanOrEqual(time); if (optional_row_id.has_value()) { @@ -286,7 +371,7 @@ Table::RowID HotColdTable::FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) c return next_row_id_; } -Table::RowID HotColdTable::FindRowIDFromTimeFirstGreaterThan(Time time) const { +Table::RowID Table::FindRowIDFromTimeFirstGreaterThan(Time time) const { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); auto optional_row_id = cold_store_->FindRowIDFromTimeFirstGreaterThan(time); if (optional_row_id.has_value()) { @@ -300,7 +385,9 @@ Table::RowID HotColdTable::FindRowIDFromTimeFirstGreaterThan(Time time) const { return next_row_id_; } -TableStats HotColdTable::GetTableStats() const { +schema::Relation Table::GetRelation() const { return rel_; } + +TableStats Table::GetTableStats() const { TableStats info; int64_t min_time = -1; int64_t num_batches = 0; @@ -334,7 +421,7 @@ TableStats HotColdTable::GetTableStats() const { return info; } -Status HotColdTable::CompactSingleBatchUnlocked(arrow::MemoryPool*) { +Status Table::CompactSingleBatchUnlocked(arrow::MemoryPool*) { const auto& compaction_spec = batch_size_accountant_->GetNextCompactedBatchSpec(); PX_RETURN_IF_ERROR( @@ -369,7 +456,7 @@ Status HotColdTable::CompactSingleBatchUnlocked(arrow::MemoryPool*) { return Status::OK(); } -Status HotColdTable::CompactHotToCold(arrow::MemoryPool* mem_pool) { +Status Table::CompactHotToCold(arrow::MemoryPool* mem_pool) { bool next_ready = false; { absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); @@ -389,7 +476,7 @@ Status HotColdTable::CompactHotToCold(arrow::MemoryPool* mem_pool) { return Status::OK(); } -StatusOr HotColdTable::ExpireCold() { +StatusOr Table::ExpireCold() { absl::base_internal::SpinLockHolder cold_lock(&cold_lock_); if (cold_store_->Size() == 0) { return false; @@ -400,178 +487,31 @@ StatusOr HotColdTable::ExpireCold() { return true; } -Status HotColdTable::ExpireBatch() { - PX_ASSIGN_OR_RETURN(auto expired_cold, ExpireCold()); - if (expired_cold) { - return Status::OK(); - } - // If we get to this point then there were no cold batches to expire, so we try to expire a hot - // batch. - return ExpireHot(); -} - -Status HotColdTable::UpdateTableMetricGauges() { - // Update table-level gauge values. - auto stats = GetTableStats(); - // Set gauge values - metrics_.cold_bytes_gauge.Set(stats.cold_bytes); - metrics_.hot_bytes_gauge.Set(stats.hot_bytes); - metrics_.num_batches_gauge.Set(stats.num_batches); - metrics_.max_table_size_gauge.Set(stats.max_table_size); - // Compute retention gauge - int64_t current_retention_ns = 0; - // If min_time is 0, there is no data in the table. - if (stats.min_time > 0) { - int64_t current_time_ns = std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); - current_retention_ns = current_time_ns - stats.min_time; - } - metrics_.retention_ns_gauge.Set(current_retention_ns); - return Status::OK(); -} - -HotOnlyTable::HotOnlyTable(std::string_view table_name, const schema::Relation& relation, - size_t max_table_size) - : Table(TableMetrics(&(GetMetricsRegistry()), std::string(table_name)), relation, - max_table_size) { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - for (const auto& [i, col_name] : Enumerate(rel_.col_names())) { - if (col_name == "time_" && rel_.GetColumnType(i) == types::DataType::TIME64NS) { - time_col_idx_ = i; - } - } - batch_size_accountant_ = - internal::BatchSizeAccountant::Create(rel_, FLAGS_table_store_table_size_limit); - // TODO(ddelnano): Move this into the base class constructor - hot_store_ = std::make_unique>( - rel_, time_col_idx_); -} - -StatusOr> HotOnlyTable::GetNextRowBatch( - Cursor* /*cursor*/, const std::vector& cols) const { - std::vector col_types; - for (int64_t col_idx : cols) { - DCHECK(static_cast(col_idx) < rel_.NumColumns()); - col_types.push_back(rel_.col_types()[col_idx]); - } - const auto row_desc = schema::RowDescriptor(col_types); +Status Table::ExpireHot() { absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); if (hot_store_->Size() == 0) { - return schema::RowBatch::WithZeroRows(row_desc, /* eow */ true, - /* eos */ true); + return error::InvalidArgument("Failed to expire row batch, no row batches in table"); } - auto&& batch = hot_store_->PopFront(); - auto batch_size = batch.Length(); - auto rb = std::make_unique(row_desc, batch_size); + hot_store_->PopFront(); batch_size_accountant_->ExpireHotBatch(); - PX_RETURN_IF_ERROR(hot_store_->AddBatchSliceToRowBatch(batch, 0, batch_size, cols, rb.get())); - return rb; -} - -Table::RowID HotOnlyTable::FirstRowID() const { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - if (hot_store_->Size() > 0) { - return hot_store_->FirstRowID(); - } - return -1; -} - -Table::RowID HotOnlyTable::LastRowID() const { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - if (hot_store_->Size() > 0) { - return hot_store_->LastRowID(); - } - return -1; -} - -Table::RowID HotOnlyTable::FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - auto optional_row_id = hot_store_->FindRowIDFromTimeFirstGreaterThanOrEqual(time); - if (optional_row_id.has_value()) { - return optional_row_id.value(); - } - return next_row_id_; -} - -Table::RowID HotOnlyTable::FindRowIDFromTimeFirstGreaterThan(Time time) const { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - auto optional_row_id = hot_store_->FindRowIDFromTimeFirstGreaterThan(time); - if (optional_row_id.has_value()) { - return optional_row_id.value(); - } - return next_row_id_; -} - -Status HotOnlyTable::ToProto(table_store::schemapb::Table* table_proto) const { - CHECK(table_proto != nullptr); - std::vector col_selector; - for (int64_t i = 0; i < static_cast(rel_.NumColumns()); i++) { - col_selector.push_back(i); - } - - Cursor cursor(this); - while (!cursor.Done()) { - PX_ASSIGN_OR_RETURN(auto cur_rb, cursor.GetNextRowBatch(col_selector)); - auto eos = cursor.Done(); - cur_rb->set_eow(eos); - cur_rb->set_eos(eos); - PX_RETURN_IF_ERROR(cur_rb->ToProto(table_proto->add_row_batches())); - } - - PX_RETURN_IF_ERROR(rel_.ToProto(table_proto->mutable_relation())); - return Status::OK(); -} - -TableStats HotOnlyTable::GetTableStats() const { - TableStats info; - int64_t min_time = -1; - int64_t num_batches = 0; - int64_t hot_bytes = 0; - int64_t cold_bytes = 0; - { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - num_batches += hot_store_->Size(); - hot_bytes = batch_size_accountant_->HotBytes(); - if (min_time == -1) { - min_time = hot_store_->MinTime(); - } - } - absl::base_internal::SpinLockHolder lock(&stats_lock_); - - info.batches_added = batches_added_; - info.batches_expired = batches_expired_; - info.bytes_added = bytes_added_; - info.num_batches = num_batches; - info.bytes = hot_bytes + cold_bytes; - info.hot_bytes = hot_bytes; - info.cold_bytes = cold_bytes; - info.compacted_batches = compacted_batches_; - info.max_table_size = max_table_size_; - info.min_time = min_time; - - return info; -} - -Status HotOnlyTable::CompactHotToCold(arrow::MemoryPool* /*mem_pool*/) { - LOG(INFO) << "Skipping compaction for HotOnlyTable"; return Status::OK(); } -Table::Time HotOnlyTable::MaxTime() const { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - if (hot_store_->Size() > 0) { - return hot_store_->MaxTime(); +Status Table::ExpireBatch() { + PX_ASSIGN_OR_RETURN(auto expired_cold, ExpireCold()); + if (expired_cold) { + return Status::OK(); } - return -1; + // If we get to this point then there were no cold batches to expire, so we try to expire a hot + // batch. + return ExpireHot(); } -Status HotOnlyTable::ExpireBatch() { return ExpireHot(); } - -Status HotOnlyTable::UpdateTableMetricGauges() { +Status Table::UpdateTableMetricGauges() { // Update table-level gauge values. auto stats = GetTableStats(); // Set gauge values + metrics_.cold_bytes_gauge.Set(stats.cold_bytes); metrics_.hot_bytes_gauge.Set(stats.hot_bytes); metrics_.num_batches_gauge.Set(stats.num_batches); metrics_.max_table_size_gauge.Set(stats.max_table_size); diff --git a/src/table_store/table/table.h b/src/table_store/table/table.h index a26accd562c..c82e9b4e6d8 100644 --- a/src/table_store/table/table.h +++ b/src/table_store/table/table.h @@ -68,316 +68,6 @@ struct TableStats { int64_t min_time; }; -class Table; - -/** - * Cursor allows iterating the table, while guaranteeing that no row is returned twice (even when - * compactions occur between accesses). {Start,Stop}Spec specify what rows the cursor should begin - * and end at when iterating the cursor. - */ -class Cursor { - using Time = internal::Time; - using RowID = internal::RowID; - - public: - /** - * StartSpec defines where a Cursor should begin within the table. Current options are to start - * at a given time, or start at the first row currently in the table. - */ - struct StartSpec { - enum StartType { - StartAtTime, - CurrentStartOfTable, - }; - StartType type = CurrentStartOfTable; - Time start_time = -1; - }; - - /** - * StopSpec defines when a Cursor should stop and be considered exhausted. Current options are - * to stop at a given time, stop at the last row currently in the table, or infinite (i.e. the - * Cursor never becomes exhausted). - */ - struct StopSpec { - enum StopType { - // Iterating a StopAtTime cursor will return all records with `timestamp <= stop_time`. - // The cursor will not be considered `Done()` until a record with `timestamp > stop_time` is - // added to the table. - // Note that StopAtTime is the most expensive of the StopTypes because it requires holding a - // table lock very briefly on each call to `Done()` or `NextBatchReady()` - StopAtTime, - // Iterating a StopAtTimeOrEndOfTable cursor will return all records with `timestamp <= - // stop_time` that existed in the table at the time of cursor creation. The cursor will be - // considered `Done()` once all records with `timestamp <= stop_time` have been consumed or - // when the end of the table is reached (end of the table is determined at cursor creation - // time). - StopAtTimeOrEndOfTable, - // Iterating a CurrentEndOfTable cursor will return all records in the table at cursor - // creation time. - CurrentEndOfTable, - // An Infinite cursor will never be considered `Done()`. - Infinite, - }; - StopType type = CurrentEndOfTable; - // Only valid for StopAtTime or StopAtTimeOrEndOfTable types. - Time stop_time = -1; - }; - - explicit Cursor(const Table* table) : Cursor(table, StartSpec{}, StopSpec{}) {} - Cursor(const Table* table, StartSpec start, StopSpec stop); - - // In the case of StopType == Infinite or StopType == StopAtTime, this returns whether the table - // has the next batch ready. In the case of StopType == CurrentEndOfTable, this returns !Done(). - // Note that `NextBatchReady() == true` doesn't guarantee that `GetNextRowBatch` will succeed. - // For instance, the desired row batch could have been expired between the call to - // `NextBatchReady()` and `GetNextRowBatch(...)`, and then the row batch after the expired one - // is past the stopping condition. In this case `GetNextRowBatch(...)` will return an error. - bool NextBatchReady(); - StatusOr> GetNextRowBatch(const std::vector& cols); - // In the case of StopType == Infinite, this function always returns false. - bool Done(); - // Change the StopSpec of the cursor. - void UpdateStopSpec(StopSpec stop); - - private: - void AdvanceToStart(const StartSpec& start); - void StopStateFromSpec(StopSpec&& stop); - void UpdateStopStateForStopAtTime(); - - // The following methods are made private so that they are only accessible from Table. - internal::RowID* LastReadRowID(); - internal::BatchHints* Hints(); - std::optional StopRowID() const; - - struct StopState { - StopSpec spec; - RowID stop_row_id; - // If StopSpec.type is StopAtTime, then stop_row_id doesn't become finalized until the time is - // within the table. This bool keeps track of when that happens. - bool stop_row_id_final = false; - }; - const Table* table_; - internal::BatchHints hints_; - RowID last_read_row_id_; - StopState stop_; - - friend class Table; - friend class HotColdTable; - friend class HotOnlyTable; -}; - -class Table : public NotCopyable { - public: - using RecordBatchPtr = internal::RecordBatchPtr; - using ArrowArrayPtr = internal::ArrowArrayPtr; - using ColdBatch = internal::ColdBatch; - using Time = internal::Time; - using TimeInterval = internal::TimeInterval; - using RowID = internal::RowID; - using RowIDInterval = internal::RowIDInterval; - using BatchID = internal::BatchID; - - Table() = delete; - virtual ~Table() = default; - - schema::Relation GetRelation() const { return rel_; } - - /** - * Get a RowBatch of data corresponding to the next data after the given cursor. - * @param cursor the Cursor to get the next row batch after. - * @param cols a vector of column indices to get data for. - * @return a unique ptr to a RowBatch with the requested data. - */ - virtual StatusOr> GetNextRowBatch( - Cursor* cursor, const std::vector& cols) const = 0; - - /** - * Get the unique identifier of the first row in the table. - * If all the data is expired from the table, this returns the last row id that was in the table. - * @return unique identifier of the first row. - */ - virtual RowID FirstRowID() const = 0; - - /** - * Get the unique identifier of the last row in the table. - * If all the data is expired from the table, this returns the last row id that was in the table. - * @return unique identifier of the last row. - */ - virtual RowID LastRowID() const = 0; - - /** - * Find the unique identifier of the first row for which its corresponding time is greater than or - * equal to the given time. - * @param time the time to search for. - * @return unique identifier of the first row with time greater than or equal to the given time. - */ - virtual RowID FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const = 0; - - /** - * Find the unique identifier of the first row for which its corresponding time is greater than - * the given time. - * @param time the time to search for. - * @return unique identifier of the first row with time greater than the given time. - */ - virtual RowID FindRowIDFromTimeFirstGreaterThan(Time time) const = 0; - - /** - * Covert the table and store in passed in proto. - * @param table_proto The table proto to write to. - * @return Status of conversion. - */ - virtual Status ToProto(table_store::schemapb::Table* table_proto) const = 0; - - virtual TableStats GetTableStats() const = 0; - - /** - * Compacts hot batches into compacted_batch_size_ sized cold batches. Each call to - * CompactHotToCold will create a maximum of kMaxBatchesPerCompactionCall cold batches. - * @param mem_pool arrow MemoryPool to be used for creating new cold batches. - */ - virtual Status CompactHotToCold(arrow::MemoryPool* mem_pool) = 0; - - /** - * Transfers the given record batch (from Stirling) into the Table. - * - * @param record_batch the record batch to be appended to the Table. - * @return status - */ - Status TransferRecordBatch(std::unique_ptr record_batch) { - // Don't transfer over empty row batches. - if (record_batch->empty() || record_batch->at(0)->Size() == 0) { - return Status::OK(); - } - - auto record_batch_w_cache = internal::RecordBatchWithCache{ - std::move(record_batch), - std::vector(rel_.NumColumns()), - std::vector(rel_.NumColumns(), false), - }; - internal::RecordOrRowBatch record_or_row_batch(std::move(record_batch_w_cache)); - - PX_RETURN_IF_ERROR(WriteHot(std::move(record_or_row_batch))); - return Status::OK(); - } - - /** - * Writes a row batch to the table. - * @param rb Rowbatch to write to the table. - */ - Status WriteRowBatch(const schema::RowBatch& rb) { - // Don't write empty row batches. - if (rb.num_columns() == 0 || rb.ColumnAt(0)->length() == 0) { - return Status::OK(); - } - - internal::RecordOrRowBatch record_or_row_batch(rb); - - PX_RETURN_IF_ERROR(WriteHot(std::move(record_or_row_batch))); - return Status::OK(); - } - - protected: - virtual Time MaxTime() const = 0; - - virtual Status ExpireBatch() = 0; - - virtual Status UpdateTableMetricGauges() = 0; - - Table(TableMetrics metrics, const schema::Relation& relation, size_t max_table_size) - : metrics_(metrics), rel_(relation), max_table_size_(max_table_size) {} - - Status ExpireHot() { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - if (hot_store_->Size() == 0) { - return error::InvalidArgument("Failed to expire row batch, no row batches in table"); - } - hot_store_->PopFront(); - batch_size_accountant_->ExpireHotBatch(); - return Status::OK(); - } - - Status ExpireRowBatches(int64_t row_batch_size) { - if (row_batch_size > max_table_size_) { - return error::InvalidArgument("RowBatch size ($0) is bigger than maximum table size ($1).", - row_batch_size, max_table_size_); - } - int64_t bytes; - { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - bytes = batch_size_accountant_->HotBytes() + batch_size_accountant_->ColdBytes(); - } - while (bytes + row_batch_size > max_table_size_) { - PX_RETURN_IF_ERROR(ExpireBatch()); - { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - bytes = batch_size_accountant_->HotBytes() + batch_size_accountant_->ColdBytes(); - } - { - absl::base_internal::SpinLockHolder lock(&stats_lock_); - batches_expired_++; - metrics_.batches_expired_counter.Increment(); - } - } - return Status::OK(); - } - - Status WriteHot(internal::RecordOrRowBatch&& record_or_row_batch) { - // See BatchSizeAccountantNonMutableState for an explanation of the thread safety and necessity - // of NonMutableState. - auto batch_stats = internal::BatchSizeAccountant::CalcBatchStats( - ABSL_TS_UNCHECKED_READ(batch_size_accountant_)->NonMutableState(), record_or_row_batch); - - PX_RETURN_IF_ERROR(ExpireRowBatches(batch_stats.bytes)); - - { - absl::base_internal::SpinLockHolder hot_lock(&hot_lock_); - auto batch_length = record_or_row_batch.Length(); - batch_size_accountant_->NewHotBatch(std::move(batch_stats)); - hot_store_->EmplaceBack(next_row_id_, std::move(record_or_row_batch)); - next_row_id_ += batch_length; - } - - { - absl::base_internal::SpinLockHolder lock(&stats_lock_); - ++batches_added_; - metrics_.batches_added_counter.Increment(); - bytes_added_ += batch_stats.bytes; - metrics_.bytes_added_counter.Increment(batch_stats.bytes); - } - - // Make sure locks are released for this call, since they are reacquired inside. - PX_RETURN_IF_ERROR(UpdateTableMetricGauges()); - return Status::OK(); - } - - mutable absl::base_internal::SpinLock hot_lock_; - - TableMetrics metrics_; - - schema::Relation rel_; - - int64_t max_table_size_ = 0; - - int64_t time_col_idx_ = -1; - - mutable absl::base_internal::SpinLock stats_lock_; - int64_t batches_expired_ ABSL_GUARDED_BY(stats_lock_) = 0; - int64_t batches_added_ ABSL_GUARDED_BY(stats_lock_) = 0; - int64_t bytes_added_ ABSL_GUARDED_BY(stats_lock_) = 0; - int64_t compacted_batches_ ABSL_GUARDED_BY(stats_lock_) = 0; - - std::unique_ptr> hot_store_ - ABSL_GUARDED_BY(hot_lock_); - - // Counter to assign a unique row ID to each row. Synchronized by hot_lock_ since its only - // accessed on a hot write. - int64_t next_row_id_ ABSL_GUARDED_BY(hot_lock_) = 0; - - std::unique_ptr batch_size_accountant_ ABSL_GUARDED_BY(hot_lock_); - - friend class Cursor; -}; - /** * Table stores data in two separate partitions, hot and cold. Hot data is "hot" from the * perspective of writes, in other words data is first written to the hot partitiion, and then later @@ -411,7 +101,7 @@ class Table : public NotCopyable { * that when GetNextRowBatch is called on the cursor it can work out that it needs to return a slice * of the batch with the original "second" batch's data. */ -class HotColdTable : public Table { +class Table : public NotCopyable { using RecordBatchPtr = internal::RecordBatchPtr; using ArrowArrayPtr = internal::ArrowArrayPtr; using ColdBatch = internal::ColdBatch; @@ -421,9 +111,6 @@ class HotColdTable : public Table { using RowIDInterval = internal::RowIDInterval; using BatchID = internal::BatchID; - // TODO(ddelnano): Maybe this should be removed - friend class Cursor; - static inline constexpr int64_t kDefaultColdBatchMinSize = 64 * 1024; public: @@ -433,9 +120,100 @@ class HotColdTable : public Table { const schema::Relation& relation) { // Create naked pointer, because std::make_shared() cannot access the private ctor. return std::shared_ptr
( - new HotColdTable(table_name, relation, FLAGS_table_store_table_size_limit)); + new Table(table_name, relation, FLAGS_table_store_table_size_limit)); } + /** + * Cursor allows iterating the table, while guaranteeing that no row is returned twice (even when + * compactions occur between accesses). {Start,Stop}Spec specify what rows the cursor should begin + * and end at when iterating the cursor. + */ + class Cursor { + public: + /** + * StartSpec defines where a Cursor should begin within the table. Current options are to start + * at a given time, or start at the first row currently in the table. + */ + struct StartSpec { + enum StartType { + StartAtTime, + CurrentStartOfTable, + }; + StartType type = CurrentStartOfTable; + Time start_time = -1; + }; + + /** + * StopSpec defines when a Cursor should stop and be considered exhausted. Current options are + * to stop at a given time, stop at the last row currently in the table, or infinite (i.e. the + * Cursor never becomes exhausted). + */ + struct StopSpec { + enum StopType { + // Iterating a StopAtTime cursor will return all records with `timestamp <= stop_time`. + // The cursor will not be considered `Done()` until a record with `timestamp > stop_time` is + // added to the table. + // Note that StopAtTime is the most expensive of the StopTypes because it requires holding a + // table lock very briefly on each call to `Done()` or `NextBatchReady()` + StopAtTime, + // Iterating a StopAtTimeOrEndOfTable cursor will return all records with `timestamp <= + // stop_time` that existed in the table at the time of cursor creation. The cursor will be + // considered `Done()` once all records with `timestamp <= stop_time` have been consumed or + // when the end of the table is reached (end of the table is determined at cursor creation + // time). + StopAtTimeOrEndOfTable, + // Iterating a CurrentEndOfTable cursor will return all records in the table at cursor + // creation time. + CurrentEndOfTable, + // An Infinite cursor will never be considered `Done()`. + Infinite, + }; + StopType type = CurrentEndOfTable; + // Only valid for StopAtTime or StopAtTimeOrEndOfTable types. + Time stop_time = -1; + }; + + explicit Cursor(const Table* table) : Cursor(table, StartSpec{}, StopSpec{}) {} + Cursor(const Table* table, StartSpec start, StopSpec stop); + + // In the case of StopType == Infinite or StopType == StopAtTime, this returns whether the table + // has the next batch ready. In the case of StopType == CurrentEndOfTable, this returns !Done(). + // Note that `NextBatchReady() == true` doesn't guarantee that `GetNextRowBatch` will succeed. + // For instance, the desired row batch could have been expired between the call to + // `NextBatchReady()` and `GetNextRowBatch(...)`, and then the row batch after the expired one + // is past the stopping condition. In this case `GetNextRowBatch(...)` will return an error. + bool NextBatchReady(); + StatusOr> GetNextRowBatch(const std::vector& cols); + // In the case of StopType == Infinite, this function always returns false. + bool Done(); + // Change the StopSpec of the cursor. + void UpdateStopSpec(StopSpec stop); + + private: + void AdvanceToStart(const StartSpec& start); + void StopStateFromSpec(StopSpec&& stop); + void UpdateStopStateForStopAtTime(); + + // The following methods are made private so that they are only accessible from Table. + internal::RowID* LastReadRowID(); + internal::BatchHints* Hints(); + std::optional StopRowID() const; + + struct StopState { + StopSpec spec; + RowID stop_row_id; + // If StopSpec.type is StopAtTime, then stop_row_id doesn't become finalized until the time is + // within the table. This bool keeps track of when that happens. + bool stop_row_id_final = false; + }; + const Table* table_; + internal::BatchHints hints_; + RowID last_read_row_id_; + StopState stop_; + + friend class Table; + }; + /** * @brief Construct a new Table object along with its columns. Can be used to create * a table (along with columns) based on a subscription message from Stirling. @@ -444,35 +222,35 @@ class HotColdTable : public Table { * @param max_table_size the maximum number of bytes that the table can hold. This is limitless * (-1) by default. */ - explicit HotColdTable(std::string_view table_name, const schema::Relation& relation, - size_t max_table_size) - : HotColdTable(table_name, relation, max_table_size, kDefaultColdBatchMinSize) {} + explicit Table(std::string_view table_name, const schema::Relation& relation, + size_t max_table_size) + : Table(table_name, relation, max_table_size, kDefaultColdBatchMinSize) {} - HotColdTable(std::string_view table_name, const schema::Relation& relation, size_t max_table_size, - size_t compacted_batch_size_); + Table(std::string_view table_name, const schema::Relation& relation, size_t max_table_size, + size_t compacted_batch_size_); /** * Get a RowBatch of data corresponding to the next data after the given cursor. - * @param cursor the Cursor to get the next row batch after. + * @param cursor the Table::Cursor to get the next row batch after. * @param cols a vector of column indices to get data for. * @return a unique ptr to a RowBatch with the requested data. */ StatusOr> GetNextRowBatch( - Cursor* cursor, const std::vector& cols) const override; + Cursor* cursor, const std::vector& cols) const; /** * Get the unique identifier of the first row in the table. * If all the data is expired from the table, this returns the last row id that was in the table. * @return unique identifier of the first row. */ - RowID FirstRowID() const override; + RowID FirstRowID() const; /** * Get the unique identifier of the last row in the table. * If all the data is expired from the table, this returns the last row id that was in the table. * @return unique identifier of the last row. */ - RowID LastRowID() const override; + RowID LastRowID() const; /** * Find the unique identifier of the first row for which its corresponding time is greater than or @@ -480,7 +258,7 @@ class HotColdTable : public Table { * @param time the time to search for. * @return unique identifier of the first row with time greater than or equal to the given time. */ - RowID FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const override; + RowID FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const; /** * Find the unique identifier of the first row for which its corresponding time is greater than @@ -488,7 +266,13 @@ class HotColdTable : public Table { * @param time the time to search for. * @return unique identifier of the first row with time greater than the given time. */ - RowID FindRowIDFromTimeFirstGreaterThan(Time time) const override; + RowID FindRowIDFromTimeFirstGreaterThan(Time time) const; + + /** + * Writes a row batch to the table. + * @param rb Rowbatch to write to the table. + */ + Status WriteRowBatch(const schema::RowBatch& rb); /** * Transfers the given record batch (from Stirling) into the Table. @@ -496,139 +280,68 @@ class HotColdTable : public Table { * @param record_batch the record batch to be appended to the Table. * @return status */ - /* Status TransferRecordBatch(std::unique_ptr record_batch) - * override; */ + Status TransferRecordBatch(std::unique_ptr record_batch); + + schema::Relation GetRelation() const; + StatusOr> GetTableAsRecordBatches() const; /** * Covert the table and store in passed in proto. * @param table_proto The table proto to write to. * @return Status of conversion. */ - Status ToProto(table_store::schemapb::Table* table_proto) const override; + Status ToProto(table_store::schemapb::Table* table_proto) const; - TableStats GetTableStats() const override; + TableStats GetTableStats() const; /** * Compacts hot batches into compacted_batch_size_ sized cold batches. Each call to * CompactHotToCold will create a maximum of kMaxBatchesPerCompactionCall cold batches. * @param mem_pool arrow MemoryPool to be used for creating new cold batches. */ - Status CompactHotToCold(arrow::MemoryPool* mem_pool) override; + Status CompactHotToCold(arrow::MemoryPool* mem_pool); private: - Time MaxTime() const override; - - Status ExpireBatch() override; + TableMetrics metrics_; - Status UpdateTableMetricGauges() override; + schema::Relation rel_; + mutable absl::base_internal::SpinLock stats_lock_; + int64_t batches_expired_ ABSL_GUARDED_BY(stats_lock_) = 0; + int64_t batches_added_ ABSL_GUARDED_BY(stats_lock_) = 0; + int64_t bytes_added_ ABSL_GUARDED_BY(stats_lock_) = 0; + int64_t compacted_batches_ ABSL_GUARDED_BY(stats_lock_) = 0; + int64_t max_table_size_ = 0; const int64_t compacted_batch_size_; + mutable absl::base_internal::SpinLock hot_lock_; + std::unique_ptr> hot_store_ + ABSL_GUARDED_BY(hot_lock_); mutable absl::base_internal::SpinLock cold_lock_; std::unique_ptr> cold_store_ ABSL_GUARDED_BY(cold_lock_); std::deque cold_batch_bytes_ ABSL_GUARDED_BY(cold_lock_); + // Counter to assign a unique row ID to each row. Synchronized by hot_lock_ since its only + // accessed on a hot write. + int64_t next_row_id_ ABSL_GUARDED_BY(hot_lock_) = 0; + int64_t time_col_idx_ = -1; + + Status WriteHot(internal::RecordOrRowBatch&& record_or_row_batch); + + Status ExpireBatch(); + Status ExpireHot(); StatusOr ExpireCold(); + Status ExpireRowBatches(int64_t row_batch_size); Status CompactSingleBatchUnlocked(arrow::MemoryPool* mem_pool) ABSL_EXCLUSIVE_LOCKS_REQUIRED(cold_lock_) ABSL_EXCLUSIVE_LOCKS_REQUIRED(hot_lock_); + Status UpdateTableMetricGauges(); - internal::ArrowArrayCompactor compactor_; -}; - -class HotOnlyTable : public Table { - using RowID = internal::RowID; + Time MaxTime() const; - public: - using StopPosition = int64_t; - static inline std::shared_ptr
Create(std::string_view table_name, - const schema::Relation& relation) { - // Create naked pointer, because std::make_shared() cannot access the private ctor. - return std::shared_ptr
( - new HotOnlyTable(table_name, relation, FLAGS_table_store_table_size_limit)); - } - - /** - * @brief Construct a new Table object along with its columns. Can be used to create - * a table (along with columns) based on a subscription message from Stirling. - * - * @param relation the relation for the table. - * @param max_table_size the maximum number of bytes that the table can hold. This is limitless - * (-1) by default. - */ - explicit HotOnlyTable(std::string_view table_name, const schema::Relation& relation, - size_t max_table_size); - - /** - * Get a RowBatch of data corresponding to the next data after the given cursor. - * @param cursor the Cursor to get the next row batch after. - * @param cols a vector of column indices to get data for. - * @return a unique ptr to a RowBatch with the requested data. - */ - StatusOr> GetNextRowBatch( - Cursor* cursor, const std::vector& cols) const override; - - /** - * Get the unique identifier of the first row in the table. - * If all the data is expired from the table, this returns the last row id that was in the table. - * @return unique identifier of the first row. - */ - RowID FirstRowID() const override; - - /** - * Get the unique identifier of the last row in the table. - * If all the data is expired from the table, this returns the last row id that was in the table. - * @return unique identifier of the last row. - */ - RowID LastRowID() const override; - - /** - * Find the unique identifier of the first row for which its corresponding time is greater than or - * equal to the given time. - * @param time the time to search for. - * @return unique identifier of the first row with time greater than or equal to the given time. - */ - RowID FindRowIDFromTimeFirstGreaterThanOrEqual(Time time) const override; - - /** - * Find the unique identifier of the first row for which its corresponding time is greater than - * the given time. - * @param time the time to search for. - * @return unique identifier of the first row with time greater than the given time. - */ - RowID FindRowIDFromTimeFirstGreaterThan(Time time) const override; - - /** - * Transfers the given record batch (from Stirling) into the Table. - * - * @param record_batch the record batch to be appended to the Table. - * @return status - */ - /* Status TransferRecordBatch(std::unique_ptr record_batch) - * override; */ - - /** - * Covert the table and store in passed in proto. - * @param table_proto The table proto to write to. - * @return Status of conversion. - */ - Status ToProto(table_store::schemapb::Table* table_proto) const override; - - TableStats GetTableStats() const override; - - /** - * Compacts hot batches into compacted_batch_size_ sized cold batches. Each call to - * CompactHotToCold will create a maximum of kMaxBatchesPerCompactionCall cold batches. - * @param mem_pool arrow MemoryPool to be used for creating new cold batches. - */ - Status CompactHotToCold(arrow::MemoryPool* mem_pool) override; - - private: - Time MaxTime() const override; - - Status ExpireBatch() override; + std::unique_ptr batch_size_accountant_ ABSL_GUARDED_BY(hot_lock_); - Status UpdateTableMetricGauges() override; + internal::ArrowArrayCompactor compactor_; friend class Cursor; }; diff --git a/src/table_store/table/table_benchmark.cc b/src/table_store/table/table_benchmark.cc index 22eb1357a16..8c65271fe2e 100644 --- a/src/table_store/table/table_benchmark.cc +++ b/src/table_store/table/table_benchmark.cc @@ -34,7 +34,7 @@ static inline std::unique_ptr
MakeTable(int64_t max_size, int64_t compact schema::Relation rel( std::vector({types::DataType::TIME64NS, types::DataType::FLOAT64}), std::vector({"time_", "float"})); - return std::make_unique("test_table", rel, max_size, compaction_size); + return std::make_unique
("test_table", rel, max_size, compaction_size); } static inline std::unique_ptr MakeHotBatch(int64_t batch_size, @@ -82,7 +82,7 @@ static inline int64_t FillTableCold(Table* table, int64_t table_size, int64_t ba return time_counter; } -static inline void ReadFullTable(Cursor* cursor) { +static inline void ReadFullTable(Table::Cursor* cursor) { while (!cursor->Done()) { benchmark::DoNotOptimize(cursor->GetNextRowBatch({0, 1})); } @@ -98,14 +98,14 @@ static void BM_TableReadAllHot(benchmark::State& state) { CHECK_EQ(table->GetTableStats().bytes, table_size); - Cursor cursor(table.get()); + Table::Cursor cursor(table.get()); for (auto _ : state) { ReadFullTable(&cursor); state.PauseTiming(); table = MakeTable(table_size, compaction_size); FillTableHot(table.get(), table_size, batch_length); - cursor = Cursor(table.get()); + cursor = Table::Cursor(table.get()); state.ResumeTiming(); } @@ -120,25 +120,25 @@ static void BM_TableReadAllCold(benchmark::State& state) { auto table = MakeTable(table_size, compaction_size); FillTableCold(table.get(), table_size, batch_length); CHECK_EQ(table->GetTableStats().bytes, table_size); - Cursor cursor(table.get()); + Table::Cursor cursor(table.get()); for (auto _ : state) { ReadFullTable(&cursor); state.PauseTiming(); - cursor = Cursor(table.get()); + cursor = Table::Cursor(table.get()); state.ResumeTiming(); } state.SetBytesProcessed(state.iterations() * table_size); } -Cursor GetLastBatchCursor(Table* table, int64_t last_time, int64_t batch_length, - const std::vector& cols) { - Cursor cursor( - table, - Cursor::StartSpec{Cursor::StartSpec::StartType::StartAtTime, last_time - 2 * batch_length}, - Cursor::StopSpec{}); +Table::Cursor GetLastBatchCursor(Table* table, int64_t last_time, int64_t batch_length, + const std::vector& cols) { + Table::Cursor cursor(table, + Table::Cursor::StartSpec{Table::Cursor::StartSpec::StartType::StartAtTime, + last_time - 2 * batch_length}, + Table::Cursor::StopSpec{}); // Advance the cursor so that it points to the last batch and has BatchHints set. cursor.GetNextRowBatch(cols); return cursor; @@ -238,7 +238,7 @@ static void BM_TableWriteFull(benchmark::State& state) { // NOLINTNEXTLINE : runtime/references. static void BM_TableCompaction(benchmark::State& state) { int64_t compaction_size = 64 * 1024; - int64_t table_size = HotColdTable::kMaxBatchesPerCompactionCall * compaction_size; + int64_t table_size = Table::kMaxBatchesPerCompactionCall * compaction_size; int64_t batch_length = 256; auto table = MakeTable(table_size, compaction_size); // Fill table first to make sure each compaction hits kMaxBatchesPerCompaction. @@ -254,7 +254,7 @@ static void BM_TableCompaction(benchmark::State& state) { } state.SetBytesProcessed(state.iterations() * compaction_size * - HotColdTable::kMaxBatchesPerCompactionCall); + Table::kMaxBatchesPerCompactionCall); } // NOLINTNEXTLINE : runtime/references. @@ -262,7 +262,7 @@ static void BM_TableThreaded(benchmark::State& state) { schema::Relation rel({types::DataType::TIME64NS}, {"time_"}); schema::RowDescriptor rd({types::DataType::TIME64NS}); std::shared_ptr
table_ptr = - std::make_shared("test_table", rel, 16 * 1024 * 1024, 5 * 1024); + std::make_shared
("test_table", rel, 16 * 1024 * 1024, 5 * 1024); int64_t batch_size = 1024; int64_t num_batches = 16 * 1024; @@ -309,7 +309,7 @@ static void BM_TableThreaded(benchmark::State& state) { int64_t batch_counter = 0; while (batch_counter < (num_batches / num_read_threads)) { - Cursor cursor(table_ptr.get()); + Table::Cursor cursor(table_ptr.get()); auto start = std::chrono::high_resolution_clock::now(); auto batch_or_s = cursor.GetNextRowBatch({0}); auto end = std::chrono::high_resolution_clock::now(); diff --git a/src/table_store/table/table_store.cc b/src/table_store/table/table_store.cc index 3bcdacbc69d..e7ed6319b87 100644 --- a/src/table_store/table/table_store.cc +++ b/src/table_store/table/table_store.cc @@ -43,7 +43,7 @@ StatusOr TableStore::CreateNewTablet(uint64_t table_id, const types::Tab const TableInfo& table_info = id_to_table_info_map_iter->second; const schema::Relation& relation = table_info.relation; - std::shared_ptr
new_tablet = HotColdTable::Create(table_info.table_name, relation); + std::shared_ptr
new_tablet = Table::Create(table_info.table_name, relation); TableIDTablet id_key = {table_id, tablet_id}; id_to_table_map_[id_key] = new_tablet; diff --git a/src/table_store/table/table_store_test.cc b/src/table_store/table/table_store_test.cc index 8e43d3f0fdd..8bd3ca761cc 100644 --- a/src/table_store/table/table_store_test.cc +++ b/src/table_store/table/table_store_test.cc @@ -42,8 +42,8 @@ class TableStoreTest : public ::testing::Test { rel2 = schema::Relation({types::DataType::INT64, types::DataType::FLOAT64, types::DataType::INT64}, {"table2col1", "table2col2", "table2col3"}); - table1 = HotColdTable::Create("test_table1", rel1); - table2 = HotColdTable::Create("test_table2", rel2); + table1 = Table::Create("test_table1", rel1); + table2 = Table::Create("test_table2", rel2); } std::unique_ptr MakeRel1ColumnWrapperBatch() { @@ -208,9 +208,9 @@ class TableStoreTabletsTest : public TableStoreTest { protected: void SetUp() override { TableStoreTest::SetUp(); - tablet1_1 = HotColdTable::Create("test_table1", rel1); - tablet1_2 = HotColdTable::Create("test_table1", rel1); - tablet2_1 = HotColdTable::Create("test_table2", rel2); + tablet1_1 = Table::Create("test_table1", rel1); + tablet1_2 = Table::Create("test_table1", rel1); + tablet2_1 = Table::Create("test_table2", rel2); } std::shared_ptr
tablet1_1; diff --git a/src/table_store/table/table_test.cc b/src/table_store/table/table_test.cc index 6a2617098e6..10d1eed6b44 100644 --- a/src/table_store/table/table_test.cc +++ b/src/table_store/table/table_test.cc @@ -37,28 +37,7 @@ namespace { // TOOD(zasgar): deduplicate this with exec/test_utils. std::shared_ptr
TestTable() { schema::Relation rel({types::DataType::FLOAT64, types::DataType::INT64}, {"col1", "col2"}); - auto table = HotColdTable::Create("test_table", rel); - - auto rb1 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 3); - std::vector col1_in1 = {0.5, 1.2, 5.3}; - std::vector col2_in1 = {1, 2, 3}; - PX_CHECK_OK(rb1.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); - PX_CHECK_OK(rb1.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); - PX_CHECK_OK(table->WriteRowBatch(rb1)); - - auto rb2 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 2); - std::vector col1_in2 = {0.1, 5.1}; - std::vector col2_in2 = {5, 6}; - PX_CHECK_OK(rb2.AddColumn(types::ToArrow(col1_in2, arrow::default_memory_pool()))); - PX_CHECK_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); - PX_CHECK_OK(table->WriteRowBatch(rb2)); - - return table; -} - -std::shared_ptr
HotOnlyTestTable() { - schema::Relation rel({types::DataType::FLOAT64, types::DataType::INT64}, {"col1", "col2"}); - auto table = HotOnlyTable::Create("test_table", rel); + auto table = Table::Create("test_table", rel); auto rb1 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 3); std::vector col1_in1 = {0.5, 1.2, 5.3}; @@ -82,42 +61,7 @@ std::shared_ptr
HotOnlyTestTable() { TEST(TableTest, basic_test) { schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); - Table& table = *table_ptr; - - auto rb1 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 3); - std::vector col1_in1 = {true, false, true}; - std::vector col2_in1 = {1, 2, 3}; - EXPECT_OK(rb1.AddColumn(types::ToArrow(col1_in1, arrow::default_memory_pool()))); - EXPECT_OK(rb1.AddColumn(types::ToArrow(col2_in1, arrow::default_memory_pool()))); - EXPECT_OK(table.WriteRowBatch(rb1)); - - auto rb2 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 2); - std::vector col1_in2 = {false, false}; - std::vector col2_in2 = {5, 6}; - EXPECT_OK(rb2.AddColumn(types::ToArrow(col1_in2, arrow::default_memory_pool()))); - EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); - EXPECT_OK(table.WriteRowBatch(rb2)); - - Cursor cursor(table_ptr.get()); - - auto actual_rb1 = cursor.GetNextRowBatch(std::vector({0, 1})).ConsumeValueOrDie(); - EXPECT_TRUE( - actual_rb1->ColumnAt(0)->Equals(types::ToArrow(col1_in1, arrow::default_memory_pool()))); - EXPECT_TRUE( - actual_rb1->ColumnAt(1)->Equals(types::ToArrow(col2_in1, arrow::default_memory_pool()))); - - auto actual_rb2 = cursor.GetNextRowBatch(std::vector({0, 1})).ConsumeValueOrDie(); - EXPECT_TRUE( - actual_rb2->ColumnAt(0)->Equals(types::ToArrow(col1_in2, arrow::default_memory_pool()))); - EXPECT_TRUE( - actual_rb2->ColumnAt(1)->Equals(types::ToArrow(col2_in2, arrow::default_memory_pool()))); -} - -TEST(TableTest, HotOnlyTable_basic_test) { - schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - - std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); + std::shared_ptr
table_ptr = Table::Create("test_table", rel); Table& table = *table_ptr; auto rb1 = schema::RowBatch(schema::RowDescriptor(rel.col_types()), 3); @@ -134,7 +78,7 @@ TEST(TableTest, HotOnlyTable_basic_test) { EXPECT_OK(rb2.AddColumn(types::ToArrow(col2_in2, arrow::default_memory_pool()))); EXPECT_OK(table.WriteRowBatch(rb2)); - Cursor cursor(table_ptr.get()); + Table::Cursor cursor(table_ptr.get()); auto actual_rb1 = cursor.GetNextRowBatch(std::vector({0, 1})).ConsumeValueOrDie(); EXPECT_TRUE( @@ -153,60 +97,7 @@ TEST(TableTest, bytes_test) { auto rd = schema::RowDescriptor({types::DataType::INT64, types::DataType::STRING}); schema::Relation rel(rd.types(), {"col1", "col2"}); - std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); - Table& table = *table_ptr; - - schema::RowBatch rb1(rd, 3); - std::vector col1_rb1 = {4, 5, 10}; - std::vector col2_rb1 = {"hello", "abc", "defg"}; - auto col1_rb1_arrow = types::ToArrow(col1_rb1, arrow::default_memory_pool()); - auto col2_rb1_arrow = types::ToArrow(col2_rb1, arrow::default_memory_pool()); - EXPECT_OK(rb1.AddColumn(col1_rb1_arrow)); - EXPECT_OK(rb1.AddColumn(col2_rb1_arrow)); - int64_t rb1_size = 3 * sizeof(int64_t) + 12 * sizeof(char) + 3 * sizeof(uint32_t); - - EXPECT_OK(table.WriteRowBatch(rb1)); - EXPECT_EQ(table.GetTableStats().bytes, rb1_size); - - schema::RowBatch rb2(rd, 2); - std::vector col1_rb2 = {4, 5}; - std::vector col2_rb2 = {"a", "bc"}; - auto col1_rb2_arrow = types::ToArrow(col1_rb2, arrow::default_memory_pool()); - auto col2_rb2_arrow = types::ToArrow(col2_rb2, arrow::default_memory_pool()); - EXPECT_OK(rb2.AddColumn(col1_rb2_arrow)); - EXPECT_OK(rb2.AddColumn(col2_rb2_arrow)); - int64_t rb2_size = 2 * sizeof(int64_t) + 3 * sizeof(char) + 2 * sizeof(uint32_t); - - EXPECT_OK(table.WriteRowBatch(rb2)); - EXPECT_EQ(table.GetTableStats().bytes, rb1_size + rb2_size); - - std::vector time_hot_col1 = {1, 5, 3}; - std::vector time_hot_col2 = {"test", "abc", "de"}; - auto wrapper_batch_1 = std::make_unique(); - auto col_wrapper_1 = std::make_shared(3); - col_wrapper_1->Clear(); - for (const auto& num : time_hot_col1) { - col_wrapper_1->Append(num); - } - auto col_wrapper_2 = std::make_shared(3); - col_wrapper_2->Clear(); - for (const auto& num : time_hot_col2) { - col_wrapper_2->Append(num); - } - wrapper_batch_1->push_back(col_wrapper_1); - wrapper_batch_1->push_back(col_wrapper_2); - int64_t rb3_size = 3 * sizeof(int64_t) + 9 * sizeof(char) + 3 * sizeof(uint32_t); - - EXPECT_OK(table.TransferRecordBatch(std::move(wrapper_batch_1))); - - EXPECT_EQ(table.GetTableStats().bytes, rb1_size + rb2_size + rb3_size); -} - -TEST(TableTest, HotOnlyTable_bytes_test) { - auto rd = schema::RowDescriptor({types::DataType::INT64, types::DataType::STRING}); - schema::Relation rel(rd.types(), {"col1", "col2"}); - - std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); + std::shared_ptr
table_ptr = Table::Create("test_table", rel); Table& table = *table_ptr; schema::RowBatch rb1(rd, 3); @@ -297,7 +188,7 @@ TEST(TableTest, bytes_test_w_compaction) { // Make minimum batch size rb1_size + rb2_size so that compaction causes 2 of the 3 batches to // be compacted into cold. std::shared_ptr
table_ptr = - std::make_shared("test_table", rel, 128 * 1024, rb1_size + rb2_size); + std::make_shared
("test_table", rel, 128 * 1024, rb1_size + rb2_size); Table& table = *table_ptr; EXPECT_OK(table.WriteRowBatch(rb1)); @@ -317,7 +208,7 @@ TEST(TableTest, expiry_test) { auto rd = schema::RowDescriptor({types::DataType::INT64, types::DataType::STRING}); schema::Relation rel(rd.types(), {"col1", "col2"}); - HotColdTable table("test_table", rel, 80); + Table table("test_table", rel, 80); schema::RowBatch rb1(rd, 3); std::vector col1_rb1 = {4, 5, 10}; @@ -463,7 +354,7 @@ TEST(TableTest, expiry_test_w_compaction) { wrapper_batch_1_2->push_back(col_wrapper_2_2); int64_t rb5_size = 5 * sizeof(int64_t) + 20 * sizeof(char) + 5 * sizeof(uint32_t); - HotColdTable table("test_table", rel, 80, 40); + Table table("test_table", rel, 80, 40); EXPECT_OK(table.WriteRowBatch(rb1)); EXPECT_EQ(table.GetTableStats().bytes, rb1_size); @@ -485,7 +376,7 @@ TEST(TableTest, batch_size_too_big) { auto rd = schema::RowDescriptor({types::DataType::INT64, types::DataType::STRING}); schema::Relation rel(rd.types(), {"col1", "col2"}); - HotColdTable table("test_table", rel, 10); + Table table("test_table", rel, 10); schema::RowBatch rb1(rd, 3); std::vector col1_rb1 = {4, 5, 10}; @@ -503,7 +394,7 @@ TEST(TableTest, write_row_batch) { auto rd = schema::RowDescriptor({types::DataType::BOOLEAN, types::DataType::INT64}); schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); + std::shared_ptr
table_ptr = Table::Create("test_table", rel); Table& table = *table_ptr; schema::RowBatch rb1(rd, 2); @@ -516,32 +407,7 @@ TEST(TableTest, write_row_batch) { EXPECT_OK(table.WriteRowBatch(rb1)); - Cursor cursor(table_ptr.get()); - auto rb_or_s = cursor.GetNextRowBatch({0, 1}); - ASSERT_OK(rb_or_s); - auto actual_rb = rb_or_s.ConsumeValueOrDie(); - EXPECT_TRUE(actual_rb->ColumnAt(0)->Equals(col1_rb1_arrow)); - EXPECT_TRUE(actual_rb->ColumnAt(1)->Equals(col2_rb1_arrow)); -} - -TEST(TableTest, HotOnlyTable_write_row_batch) { - auto rd = schema::RowDescriptor({types::DataType::BOOLEAN, types::DataType::INT64}); - schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - - std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); - Table& table = *table_ptr; - - schema::RowBatch rb1(rd, 2); - std::vector col1_rb1 = {true, false}; - std::vector col2_rb1 = {1, 2}; - auto col1_rb1_arrow = types::ToArrow(col1_rb1, arrow::default_memory_pool()); - auto col2_rb1_arrow = types::ToArrow(col2_rb1, arrow::default_memory_pool()); - EXPECT_OK(rb1.AddColumn(col1_rb1_arrow)); - EXPECT_OK(rb1.AddColumn(col2_rb1_arrow)); - - EXPECT_OK(table.WriteRowBatch(rb1)); - - Cursor cursor(table_ptr.get()); + Table::Cursor cursor(table_ptr.get()); auto rb_or_s = cursor.GetNextRowBatch({0, 1}); ASSERT_OK(rb_or_s); auto actual_rb = rb_or_s.ConsumeValueOrDie(); @@ -552,48 +418,7 @@ TEST(TableTest, HotOnlyTable_write_row_batch) { TEST(TableTest, hot_batches_test) { schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - std::shared_ptr
table_ptr = HotColdTable::Create("table_name", rel); - Table& table = *table_ptr; - - std::vector col1_in1 = {true, false, true}; - auto col1_in1_wrapper = - types::ColumnWrapper::FromArrow(types::ToArrow(col1_in1, arrow::default_memory_pool())); - std::vector col1_in2 = {false, false}; - auto col1_in2_wrapper = - types::ColumnWrapper::FromArrow(types::ToArrow(col1_in2, arrow::default_memory_pool())); - - std::vector col2_in1 = {1, 2, 3}; - auto col2_in1_wrapper = - types::ColumnWrapper::FromArrow(types::ToArrow(col2_in1, arrow::default_memory_pool())); - std::vector col2_in2 = {5, 6}; - auto col2_in2_wrapper = - types::ColumnWrapper::FromArrow(types::ToArrow(col2_in2, arrow::default_memory_pool())); - - auto rb_wrapper_1 = std::make_unique(); - rb_wrapper_1->push_back(col1_in1_wrapper); - rb_wrapper_1->push_back(col2_in1_wrapper); - EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_1))); - - auto rb_wrapper_2 = std::make_unique(); - rb_wrapper_2->push_back(col1_in2_wrapper); - rb_wrapper_2->push_back(col2_in2_wrapper); - EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_2))); - - Cursor cursor(table_ptr.get()); - auto rb1 = cursor.GetNextRowBatch({0, 1}).ConsumeValueOrDie(); - EXPECT_TRUE(rb1->ColumnAt(0)->Equals(types::ToArrow(col1_in1, arrow::default_memory_pool()))); - EXPECT_TRUE(rb1->ColumnAt(1)->Equals(types::ToArrow(col2_in1, arrow::default_memory_pool()))); - - auto rb2 = cursor.GetNextRowBatch({0, 1}).ConsumeValueOrDie(); - ASSERT_NE(rb2, nullptr); - EXPECT_TRUE(rb2->ColumnAt(0)->Equals(types::ToArrow(col1_in2, arrow::default_memory_pool()))); - EXPECT_TRUE(rb2->ColumnAt(1)->Equals(types::ToArrow(col2_in2, arrow::default_memory_pool()))); -} - -TEST(TableTest, HotOnlyTable_hot_batches_test) { - schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - - std::shared_ptr
table_ptr = HotOnlyTable::Create("table_name", rel); + std::shared_ptr
table_ptr = Table::Create("table_name", rel); Table& table = *table_ptr; std::vector col1_in1 = {true, false, true}; @@ -620,7 +445,7 @@ TEST(TableTest, HotOnlyTable_hot_batches_test) { rb_wrapper_2->push_back(col2_in2_wrapper); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_2))); - Cursor cursor(table_ptr.get()); + Table::Cursor cursor(table_ptr.get()); auto rb1 = cursor.GetNextRowBatch({0, 1}).ConsumeValueOrDie(); EXPECT_TRUE(rb1->ColumnAt(0)->Equals(types::ToArrow(col1_in1, arrow::default_memory_pool()))); EXPECT_TRUE(rb1->ColumnAt(1)->Equals(types::ToArrow(col2_in1, arrow::default_memory_pool()))); @@ -657,12 +482,12 @@ TEST(TableTest, hot_batches_w_compaction_test) { rb_wrapper_2->push_back(col1_in2_wrapper); rb_wrapper_2->push_back(col2_in2_wrapper); - HotColdTable table("test_table", rel, 128 * 1024, rb1_size); + Table table("test_table", rel, 128 * 1024, rb1_size); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_1))); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_2))); - Cursor cursor(&table); + Table::Cursor cursor(&table); auto rb1 = cursor.GetNextRowBatch({0, 1}).ConsumeValueOrDie(); EXPECT_TRUE(rb1->ColumnAt(0)->Equals(types::ToArrow(col1_in1, arrow::default_memory_pool()))); EXPECT_TRUE(rb1->ColumnAt(1)->Equals(types::ToArrow(col2_in1, arrow::default_memory_pool()))); @@ -677,7 +502,7 @@ TEST(TableTest, hot_batches_w_compaction_test) { TEST(TableTest, find_rowid_from_time_first_greater_than_or_equal) { schema::Relation rel(std::vector({types::DataType::TIME64NS}), std::vector({"time_"})); - std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); + std::shared_ptr
table_ptr = Table::Create("test_table", rel); Table& table = *table_ptr; std::vector time_batch_1 = {2, 3, 4, 6}; @@ -754,7 +579,7 @@ TEST(TableTest, find_rowid_from_time_first_greater_than_or_equal_with_compaction schema::Relation rel(std::vector({types::DataType::TIME64NS}), std::vector({"time_"})); int64_t compaction_size = 4 * sizeof(int64_t); - HotColdTable table("test_table", rel, 128 * 1024, compaction_size); + Table table("test_table", rel, 128 * 1024, compaction_size); std::vector time_batch_1 = {2, 3, 4, 6}; std::vector time_batch_2 = {8, 8, 8}; @@ -892,96 +717,11 @@ TEST(TableTest, ToProto) { EXPECT_TRUE(differ.Compare(expected_proto, table_proto)); } -// TODO(ddelnano): Not sure if this matters since I believe StopSpec::Inifinite will hit -// an error for this ToProto test. -TEST(TableTest, DISABLED_HotOnlyTable_ToProto) { - auto table = HotOnlyTestTable(); - table_store::schemapb::Table table_proto; - EXPECT_OK(table->ToProto(&table_proto)); - - std::string expected = R"( - relation { - columns { - column_name: "col1" - column_type: FLOAT64 - column_semantic_type: ST_NONE - } - columns { - column_name: "col2" - column_type: INT64 - column_semantic_type: ST_NONE - } - } - row_batches { - cols { - float64_data { - data: 0.5 - data: 1.2 - data: 5.3 - } - } - cols { - int64_data { - data: 1 - data: 2 - data: 3 - } - } - eow: false - eos: false - num_rows: 3 - } - row_batches { - cols { - float64_data { - data: 0.1 - data: 5.1 - } - } - cols { - int64_data { - data: 5 - data: 6 - } - } - eow: true - eos: true - num_rows: 2 - })"; - - google::protobuf::util::MessageDifferencer differ; - table_store::schemapb::Table expected_proto; - ASSERT_TRUE(google::protobuf::TextFormat::MergeFromString(expected, &expected_proto)); - EXPECT_TRUE(differ.Compare(expected_proto, table_proto)); -} - TEST(TableTest, transfer_empty_record_batch_test) { schema::Relation rel({types::DataType::INT64}, {"col1"}); schema::RowDescriptor rd({types::DataType::INT64}); - std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); - Table& table = *table_ptr; - - // ColumnWrapper with no columns should not be added to row batches. - auto wrapper_batch_1 = std::make_unique(); - EXPECT_OK(table.TransferRecordBatch(std::move(wrapper_batch_1))); - - EXPECT_EQ(table.GetTableStats().batches_added, 0); - - // Column wrapper with empty columns should not be added to row batches. - auto wrapper_batch_2 = std::make_unique(); - auto col_wrapper_2 = std::make_shared(0); - wrapper_batch_2->push_back(col_wrapper_2); - EXPECT_OK(table.TransferRecordBatch(std::move(wrapper_batch_2))); - - EXPECT_EQ(table.GetTableStats().batches_added, 0); -} - -TEST(TableTest, HotOnlyTable_transfer_empty_record_batch_test) { - schema::Relation rel({types::DataType::INT64}, {"col1"}); - schema::RowDescriptor rd({types::DataType::INT64}); - - std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); + std::shared_ptr
table_ptr = Table::Create("test_table", rel); Table& table = *table_ptr; // ColumnWrapper with no columns should not be added to row batches. @@ -1003,22 +743,7 @@ TEST(TableTest, write_zero_row_row_batch) { schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); schema::RowDescriptor rd({types::DataType::BOOLEAN, types::DataType::INT64}); - std::shared_ptr
table_ptr = HotColdTable::Create("test_table", rel); - - auto result = schema::RowBatch::WithZeroRows(rd, /*eow*/ false, /*eos*/ false); - ASSERT_OK(result); - auto rb_ptr = result.ConsumeValueOrDie(); - - EXPECT_OK(table_ptr->WriteRowBatch(*rb_ptr)); - // Row batch with 0 rows won't be written. - EXPECT_EQ(table_ptr->GetTableStats().batches_added, 0); -} - -TEST(TableTest, HotOnlyTable_write_zero_row_row_batch) { - schema::Relation rel({types::DataType::BOOLEAN, types::DataType::INT64}, {"col1", "col2"}); - schema::RowDescriptor rd({types::DataType::BOOLEAN, types::DataType::INT64}); - - std::shared_ptr
table_ptr = HotOnlyTable::Create("test_table", rel); + std::shared_ptr
table_ptr = Table::Create("test_table", rel); auto result = schema::RowBatch::WithZeroRows(rd, /*eow*/ false, /*eos*/ false); ASSERT_OK(result); @@ -1046,7 +771,7 @@ TEST(TableTest, threaded) { schema::Relation rel({types::DataType::TIME64NS}, {"time_"}); schema::RowDescriptor rd({types::DataType::TIME64NS}); std::shared_ptr
table_ptr = - std::make_shared("test_table", rel, 8 * 1024 * 1024, 5 * 1024); + std::make_shared
("test_table", rel, 8 * 1024 * 1024, 5 * 1024); int64_t max_time_counter = 1024 * 1024; @@ -1061,8 +786,8 @@ TEST(TableTest, threaded) { }); // Create the cursor before the write thread starts, to ensure that we get every row of the table. - Cursor cursor(table_ptr.get(), Cursor::StartSpec{}, - Cursor::StopSpec{Cursor::StopSpec::StopType::Infinite}); + Table::Cursor cursor(table_ptr.get(), Table::Cursor::StartSpec{}, + Table::Cursor::StopSpec{Table::Cursor::StopSpec::StopType::Infinite}); std::thread writer_thread([table_ptr, done, max_time_counter]() { std::default_random_engine gen; @@ -1119,7 +844,7 @@ TEST(TableTest, threaded) { } // Now that the writer is finished move the stop of the cursor to the current end of the table. - cursor.UpdateStopSpec(Cursor::StopSpec{Cursor::StopSpec::CurrentEndOfTable}); + cursor.UpdateStopSpec(Table::Cursor::StopSpec{Table::Cursor::StopSpec::CurrentEndOfTable}); // Once the writer is finished, we loop over the remaining data in the table. while (time_counter < max_time_counter && !cursor.Done()) { @@ -1147,7 +872,7 @@ TEST(TableTest, NextBatch_generation_bug) { schema::Relation rel(rd.types(), {"col1", "col2"}); int64_t rb1_size = 3 * sizeof(int64_t) + 12 * sizeof(char) + 3 * sizeof(uint32_t); - HotColdTable table("test_table", rel, rb1_size, rb1_size); + Table table("test_table", rel, rb1_size, rb1_size); schema::RowBatch rb1(rd, 3); std::vector col1_rb1 = {4, 5, 10}; @@ -1160,7 +885,7 @@ TEST(TableTest, NextBatch_generation_bug) { EXPECT_OK(table.WriteRowBatch(rb1)); EXPECT_OK(table.CompactHotToCold(arrow::default_memory_pool())); - Cursor cursor(&table, Cursor::StartSpec{}, Cursor::StopSpec{}); + Table::Cursor cursor(&table, Table::Cursor::StartSpec{}, Table::Cursor::StopSpec{}); // Force cold expiration. EXPECT_OK(table.WriteRowBatch(rb1)); // GetNextRowBatch should return invalidargument since the batch was expired. @@ -1194,12 +919,12 @@ TEST(TableTest, GetNextRowBatch_after_expiry) { rb_wrapper_2->push_back(col2_in2_wrapper); int64_t rb2_size = 2 * sizeof(bool) + 2 * sizeof(int64_t); - HotColdTable table("test_table", rel, rb1_size + rb2_size, rb1_size); + Table table("test_table", rel, rb1_size + rb2_size, rb1_size); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_1))); EXPECT_OK(table.TransferRecordBatch(std::move(rb_wrapper_2))); - Cursor cursor(&table); + Table::Cursor cursor(&table); // This write will expire the first batch. auto rb_wrapper_1_copy = std::make_unique(); @@ -1217,8 +942,8 @@ TEST(TableTest, GetNextRowBatch_after_expiry) { struct CursorTestCase { std::string name; std::vector> initial_time_batches; - Cursor::StartSpec start_spec; - Cursor::StopSpec stop_spec; + Table::Cursor::StartSpec start_spec; + Table::Cursor::StopSpec stop_spec; struct Action { enum ActionType { ExpectBatch, @@ -1240,14 +965,14 @@ class CursorTableTest : public ::testing::Test, rel_ = std::make_unique(std::vector{types::TIME64NS}, std::vector{"time_"}); - table_ptr_ = HotColdTable::Create("test_table", *rel_); + table_ptr_ = Table::Create("test_table", *rel_); for (const auto& batch : test_case_.initial_time_batches) { WriteBatch(batch); } - cursor_ = - std::make_unique(table_ptr_.get(), test_case_.start_spec, test_case_.stop_spec); + cursor_ = std::make_unique(table_ptr_.get(), test_case_.start_spec, + test_case_.stop_spec); } void WriteBatch(const std::vector& times) { @@ -1277,7 +1002,7 @@ class CursorTableTest : public ::testing::Test, CursorTestCase test_case_; std::unique_ptr rel_; std::shared_ptr
table_ptr_; - std::unique_ptr cursor_; + std::unique_ptr cursor_; }; TEST_P(CursorTableTest, cursor_test) { @@ -1296,8 +1021,8 @@ TEST_P(CursorTableTest, cursor_test) { } } -using StartType = Cursor::StartSpec::StartType; -using StopType = Cursor::StopSpec::StopType; +using StartType = Table::Cursor::StartSpec::StartType; +using StopType = Table::Cursor::StopSpec::StopType; INSTANTIATE_TEST_SUITE_P(CursorTableTestSuite, CursorTableTest, ::testing::ValuesIn(std::vector{ diff --git a/src/table_store/table/tablets_group.cc b/src/table_store/table/tablets_group.cc index 9c35d0a9bbb..adf6e0f961a 100644 --- a/src/table_store/table/tablets_group.cc +++ b/src/table_store/table/tablets_group.cc @@ -24,7 +24,7 @@ namespace table_store { void TabletsGroup::CreateTablet(const types::TabletID& tablet_id) { LOG_IF(DFATAL, HasTablet(tablet_id)) << absl::Substitute("Tablet with id $0 already exists in Table.", tablet_id); - tablet_id_to_tablet_map_[tablet_id] = HotColdTable::Create(tablet_id, relation_); + tablet_id_to_tablet_map_[tablet_id] = Table::Create(tablet_id, relation_); } void TabletsGroup::AddTablet(const types::TabletID& tablet_id, std::shared_ptr
tablet) { diff --git a/src/table_store/table/tablets_group_test.cc b/src/table_store/table/tablets_group_test.cc index 6d34aac5637..a9ec26ba7da 100644 --- a/src/table_store/table/tablets_group_test.cc +++ b/src/table_store/table/tablets_group_test.cc @@ -40,8 +40,8 @@ class TabletsGroupTest : public ::testing::Test { rel2 = schema::Relation({types::DataType::INT64, types::DataType::FLOAT64, types::DataType::INT64}, {"table2col1", "table2col2", "table2col3"}); - tablet1 = HotColdTable::Create("test_table1", rel1); - tablet2 = HotColdTable::Create("test_table2", rel2); + tablet1 = Table::Create("test_table1", rel1); + tablet2 = Table::Create("test_table2", rel2); } std::shared_ptr
tablet1; diff --git a/src/table_store/test_utils.h b/src/table_store/test_utils.h index 2f4bc35ab4b..ae524c612e0 100644 --- a/src/table_store/test_utils.h +++ b/src/table_store/test_utils.h @@ -61,7 +61,7 @@ inline StatusOr> CreateTable( const datagen::DistributionParams* dist_vars, const datagen::DistributionParams* len_vars) { schema::RowDescriptor rd(types); - auto table = HotColdTable::Create("test_table", table_store::schema::Relation(types, col_names)); + auto table = Table::Create("test_table", table_store::schema::Relation(types, col_names)); for (int batch_idx = 0; batch_idx < num_batches; batch_idx++) { auto rb = schema::RowBatch(schema::RowDescriptor(types), rb_size); diff --git a/src/ui/src/utils/pxl.ts b/src/ui/src/utils/pxl.ts index cc07e2c06bd..ba44b9e4ac5 100644 --- a/src/ui/src/utils/pxl.ts +++ b/src/ui/src/utils/pxl.ts @@ -20,8 +20,6 @@ const pxlMutations = [ 'from pxtrace', 'import pxtrace', - 'from pxlog', - 'import pxlog', 'import pxconfig', ]; diff --git a/src/vizier/funcs/context/vizier_context.h b/src/vizier/funcs/context/vizier_context.h index 6820ac738f3..a431c4cdd12 100644 --- a/src/vizier/funcs/context/vizier_context.h +++ b/src/vizier/funcs/context/vizier_context.h @@ -42,19 +42,17 @@ class VizierFuncFactoryContext : public NotCopyable { public: using MDSStub = services::metadata::MetadataService::Stub; using MDTPStub = services::metadata::MetadataTracepointService::Stub; - using MDFSStub = services::metadata::MetadataFileSourceService::Stub; VizierFuncFactoryContext() = default; VizierFuncFactoryContext( const agent::BaseManager* agent_manager, const std::shared_ptr& mds_stub, - const std::shared_ptr& mdtp_stub, const std::shared_ptr& mdfs_stub, + const std::shared_ptr& mdtp_stub, const std::shared_ptr& cronscript_stub, std::shared_ptr<::px::table_store::TableStore> table_store, std::function add_grpc_auth) : agent_manager_(agent_manager), mds_stub_(mds_stub), mdtp_stub_(mdtp_stub), - mdfs_stub_(mdfs_stub), cronscript_stub_(cronscript_stub), table_store_(table_store), add_auth_to_grpc_context_func_(add_grpc_auth) {} @@ -74,10 +72,6 @@ class VizierFuncFactoryContext : public NotCopyable { CHECK(mdtp_stub_ != nullptr); return mdtp_stub_; } - std::shared_ptr mdfs_stub() const { - CHECK(mdfs_stub_ != nullptr); - return mdfs_stub_; - } std::shared_ptr cronscript_stub() const { CHECK(cronscript_stub_ != nullptr); return cronscript_stub_; @@ -94,7 +88,6 @@ class VizierFuncFactoryContext : public NotCopyable { const agent::BaseManager* agent_manager_ = nullptr; std::shared_ptr mds_stub_ = nullptr; std::shared_ptr mdtp_stub_ = nullptr; - std::shared_ptr mdfs_stub_ = nullptr; std::shared_ptr cronscript_stub_ = nullptr; std::shared_ptr<::px::table_store::TableStore> table_store_ = nullptr; std::function add_auth_to_grpc_context_func_; diff --git a/src/vizier/funcs/md_udtfs/md_udtfs.cc b/src/vizier/funcs/md_udtfs/md_udtfs.cc index 7629b75cb61..ec6f8926e80 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs.cc +++ b/src/vizier/funcs/md_udtfs/md_udtfs.cc @@ -55,8 +55,6 @@ void RegisterFuncsOrDie(const VizierFuncFactoryContext& ctx, carnot::udf::Regist registry->RegisterFactoryOrDie>( "GetTracepointStatus", ctx); - registry->RegisterFactoryOrDie>( - "GetFileSourceStatus", ctx); registry ->RegisterFactoryOrDie>( "GetCronScriptHistory", ctx); diff --git a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h index 1ae7b9900cb..1bc99b20b5c 100644 --- a/src/vizier/funcs/md_udtfs/md_udtfs_impl.h +++ b/src/vizier/funcs/md_udtfs/md_udtfs_impl.h @@ -76,20 +76,6 @@ class UDTFWithMDTPFactory : public carnot::udf::UDTFFactory { const VizierFuncFactoryContext& ctx_; }; -template -class UDTFWithMDFSFactory : public carnot::udf::UDTFFactory { - public: - UDTFWithMDFSFactory() = delete; - explicit UDTFWithMDFSFactory(const VizierFuncFactoryContext& ctx) : ctx_(ctx) {} - - std::unique_ptr Make() override { - return std::make_unique(ctx_.mdfs_stub(), ctx_.add_auth_to_grpc_context_func()); - } - - private: - const VizierFuncFactoryContext& ctx_; -}; - template class UDTFWithCronscriptFactory : public carnot::udf::UDTFFactory { public: @@ -151,9 +137,7 @@ class GetTables final : public carnot::udf::UDTF { return MakeArray(ColInfo("table_name", types::DataType::STRING, types::PatternType::GENERAL, "The table name"), ColInfo("table_desc", types::DataType::STRING, types::PatternType::GENERAL, - "Description of the table"), - ColInfo("table_metadata", types::DataType::STRING, types::PatternType::GENERAL, - "Metadata of the table in JSON")); + "Description of the table")); } Status Init(FunctionContext*) { @@ -168,7 +152,7 @@ class GetTables final : public carnot::udf::UDTF { } for (const auto& [table_name, rel] : resp.schema().relation_map()) { - table_info_.emplace_back(table_name, rel.desc(), rel.mutation_id()); + table_info_.emplace_back(table_name, rel.desc()); } return Status::OK(); } @@ -180,7 +164,6 @@ class GetTables final : public carnot::udf::UDTF { const auto& r = table_info_[idx_]; rw->Append(r.table_name); rw->Append(r.table_desc); - rw->Append(r.table_metadata); idx_++; return idx_ < static_cast(table_info_.size()); @@ -188,12 +171,10 @@ class GetTables final : public carnot::udf::UDTF { private: struct TableInfo { - TableInfo(const std::string& table_name, const std::string& table_desc, - const std::string& table_metadata) - : table_name(table_name), table_desc(table_desc), table_metadata(table_metadata) {} + TableInfo(const std::string& table_name, const std::string& table_desc) + : table_name(table_name), table_desc(table_desc) {} std::string table_name; std::string table_desc; - std::string table_metadata; }; int idx_ = 0; @@ -900,8 +881,6 @@ class GetTracepointStatus final : public carnot::udf::UDTF static constexpr auto OutputRelation() { return MakeArray(ColInfo("tracepoint_id", types::DataType::UINT128, types::PatternType::GENERAL, "The id of the tracepoint"), - ColInfo("tracepoint_id_str", types::DataType::STRING, types::PatternType::GENERAL, - "The string id of the tracepoint"), ColInfo("name", types::DataType::STRING, types::PatternType::GENERAL, "The name of the tracepoint"), ColInfo("state", types::DataType::STRING, types::PatternType::GENERAL, @@ -980,7 +959,6 @@ class GetTracepointStatus final : public carnot::udf::UDTF tables.Accept(tables_writer); rw->Append(absl::MakeUint128(u.ab, u.cd)); - rw->Append(u.str()); rw->Append(tracepoint_info.name()); rw->Append(state); @@ -1007,130 +985,6 @@ class GetTracepointStatus final : public carnot::udf::UDTF std::function add_context_authentication_func_; }; -/** - * This UDTF fetches information about tracepoints from MDS. - */ -class GetFileSourceStatus final : public carnot::udf::UDTF { - public: - using MDFSStub = vizier::services::metadata::MetadataFileSourceService::Stub; - using FileSourceResponse = vizier::services::metadata::GetFileSourceInfoResponse; - GetFileSourceStatus() = delete; - explicit GetFileSourceStatus(std::shared_ptr stub, - std::function add_context_authentication) - : idx_(0), stub_(stub), add_context_authentication_func_(add_context_authentication) {} - - static constexpr auto Executor() { return carnot::udfspb::UDTFSourceExecutor::UDTF_ONE_KELVIN; } - - static constexpr auto OutputRelation() { - // TODO(ddelnano): Change the file_source_id column to a UINT128 once the pxl lookup from - // px/pipeline_flow_graph works. That script has a UINT128 stored as a string and needs to - // be joined with this column - return MakeArray(ColInfo("file_source_id", types::DataType::STRING, - types::PatternType::GENERAL, "The id of the file source"), - ColInfo("name", types::DataType::STRING, types::PatternType::GENERAL, - "The name of the file source"), - ColInfo("state", types::DataType::STRING, types::PatternType::GENERAL, - "The state of the file source"), - ColInfo("status", types::DataType::STRING, types::PatternType::GENERAL, - "The status message if not healthy"), - ColInfo("output_tables", types::DataType::STRING, types::PatternType::GENERAL, - "A list of tables output by the file source")); - // TODO(ddelnano): Add in the create time, and TTL in here after we add those attributes to the - // GetFileSourceInfo RPC call in MDS. - } - - Status Init(FunctionContext*) { - px::vizier::services::metadata::GetFileSourceInfoRequest req; - resp_ = std::make_unique(); - - grpc::ClientContext ctx; - add_context_authentication_func_(&ctx); - auto s = stub_->GetFileSourceInfo(&ctx, req, resp_.get()); - if (!s.ok()) { - return error::Internal("Failed to make RPC call to GetFileSourceStatus: $0", - s.error_message()); - } - return Status::OK(); - } - - bool NextRecord(FunctionContext*, RecordWriter* rw) { - if (resp_->file_sources_size() == 0) { - return false; - } - const auto& file_source_info = resp_->file_sources(idx_); - - auto u_or_s = ParseUUID(file_source_info.id()); - sole::uuid u; - if (u_or_s.ok()) { - u = u_or_s.ConsumeValueOrDie(); - } - - auto actual = file_source_info.state(); - auto expected = file_source_info.expected_state(); - std::string state; - - switch (actual) { - case statuspb::PENDING_STATE: { - state = "pending"; - break; - } - case statuspb::RUNNING_STATE: { - state = "running"; - break; - } - case statuspb::FAILED_STATE: { - state = "failed"; - break; - } - case statuspb::TERMINATED_STATE: { - if (actual != expected) { - state = "terminating"; - } else { - state = "terminated"; - } - break; - } - default: - state = "unknown"; - } - - rapidjson::Document tables; - tables.SetArray(); - for (const auto& table : file_source_info.schema_names()) { - tables.PushBack(internal::StringRef(table), tables.GetAllocator()); - } - - rapidjson::StringBuffer tables_sb; - rapidjson::Writer tables_writer(tables_sb); - tables.Accept(tables_writer); - - rw->Append(u.str()); - rw->Append(file_source_info.name()); - rw->Append(state); - - rapidjson::Document statuses; - statuses.SetArray(); - for (const auto& status : file_source_info.statuses()) { - statuses.PushBack(internal::StringRef(status.msg()), statuses.GetAllocator()); - } - rapidjson::StringBuffer statuses_sb; - rapidjson::Writer statuses_writer(statuses_sb); - statuses.Accept(statuses_writer); - rw->Append(statuses_sb.GetString()); - - rw->Append(tables_sb.GetString()); - - ++idx_; - return idx_ < resp_->file_sources_size(); - } - - private: - int idx_ = 0; - std::unique_ptr resp_; - std::shared_ptr stub_; - std::function add_context_authentication_func_; -}; - class GetCronScriptHistory final : public carnot::udf::UDTF { public: using CronScriptStoreStub = vizier::services::metadata::CronScriptStoreService::Stub; diff --git a/src/vizier/messages/messagespb/BUILD.bazel b/src/vizier/messages/messagespb/BUILD.bazel index 902b666b693..5be2739d0dc 100644 --- a/src/vizier/messages/messagespb/BUILD.bazel +++ b/src/vizier/messages/messagespb/BUILD.bazel @@ -24,7 +24,6 @@ pl_proto_library( "//src/api/proto/uuidpb:uuid_pl_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_proto", - "//src/carnot/planner/file_source/ir:logical_pl_proto", "//src/carnot/planpb:plan_pl_proto", "//src/common/base/statuspb:status_pl_proto", "//src/shared/bloomfilterpb:bloomfilter_pl_proto", @@ -45,7 +44,6 @@ pl_cc_proto_library( "//src/api/proto/uuidpb:uuid_pl_cc_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_cc_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", - "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/carnot/planpb:plan_pl_cc_proto", "//src/common/base/statuspb:status_pl_cc_proto", "//src/shared/bloomfilterpb:bloomfilter_pl_cc_proto", @@ -67,7 +65,6 @@ pl_go_proto_library( "//src/api/proto/uuidpb:uuid_pl_go_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_go_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto", - "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/carnot/planpb:plan_pl_go_proto", "//src/common/base/statuspb:status_pl_go_proto", "//src/shared/bloomfilterpb:bloomfilter_pl_go_proto", diff --git a/src/vizier/messages/messagespb/messages.pb.go b/src/vizier/messages/messagespb/messages.pb.go index f2b36a62af1..02713a1d90b 100755 --- a/src/vizier/messages/messagespb/messages.pb.go +++ b/src/vizier/messages/messagespb/messages.pb.go @@ -13,7 +13,6 @@ import ( uuidpb "px.dev/pixie/src/api/proto/uuidpb" distributedpb "px.dev/pixie/src/carnot/planner/distributedpb" logicalpb "px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb" - ir "px.dev/pixie/src/carnot/planner/file_source/ir" planpb "px.dev/pixie/src/carnot/planpb" statuspb "px.dev/pixie/src/common/base/statuspb" metadatapb "px.dev/pixie/src/shared/k8s/metadatapb" @@ -45,7 +44,6 @@ type VizierMessage struct { // *VizierMessage_TracepointMessage // *VizierMessage_ConfigUpdateMessage // *VizierMessage_K8SMetadataMessage - // *VizierMessage_FileSourceMessage Msg isVizierMessage_Msg `protobuf_oneof:"msg"` } @@ -115,9 +113,6 @@ type VizierMessage_ConfigUpdateMessage struct { type VizierMessage_K8SMetadataMessage struct { K8SMetadataMessage *K8SMetadataMessage `protobuf:"bytes,12,opt,name=k8s_metadata_message,json=k8sMetadataMessage,proto3,oneof" json:"k8s_metadata_message,omitempty"` } -type VizierMessage_FileSourceMessage struct { - FileSourceMessage *FileSourceMessage `protobuf:"bytes,13,opt,name=file_source_message,json=fileSourceMessage,proto3,oneof" json:"file_source_message,omitempty"` -} func (*VizierMessage_RegisterAgentRequest) isVizierMessage_Msg() {} func (*VizierMessage_RegisterAgentResponse) isVizierMessage_Msg() {} @@ -128,7 +123,6 @@ func (*VizierMessage_ExecuteQueryRequest) isVizierMessage_Msg() {} func (*VizierMessage_TracepointMessage) isVizierMessage_Msg() {} func (*VizierMessage_ConfigUpdateMessage) isVizierMessage_Msg() {} func (*VizierMessage_K8SMetadataMessage) isVizierMessage_Msg() {} -func (*VizierMessage_FileSourceMessage) isVizierMessage_Msg() {} func (m *VizierMessage) GetMsg() isVizierMessage_Msg { if m != nil { @@ -200,13 +194,6 @@ func (m *VizierMessage) GetK8SMetadataMessage() *K8SMetadataMessage { return nil } -func (m *VizierMessage) GetFileSourceMessage() *FileSourceMessage { - if x, ok := m.GetMsg().(*VizierMessage_FileSourceMessage); ok { - return x.FileSourceMessage - } - return nil -} - // XXX_OneofWrappers is for the internal use of the proto package. func (*VizierMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -219,7 +206,6 @@ func (*VizierMessage) XXX_OneofWrappers() []interface{} { (*VizierMessage_TracepointMessage)(nil), (*VizierMessage_ConfigUpdateMessage)(nil), (*VizierMessage_K8SMetadataMessage)(nil), - (*VizierMessage_FileSourceMessage)(nil), } } @@ -321,104 +307,6 @@ func (*TracepointMessage) XXX_OneofWrappers() []interface{} { } } -type FileSourceMessage struct { - // Types that are valid to be assigned to Msg: - // *FileSourceMessage_FileSourceInfoUpdate - // *FileSourceMessage_RemoveFileSourceRequest - // *FileSourceMessage_RegisterFileSourceRequest - Msg isFileSourceMessage_Msg `protobuf_oneof:"msg"` -} - -func (m *FileSourceMessage) Reset() { *m = FileSourceMessage{} } -func (*FileSourceMessage) ProtoMessage() {} -func (*FileSourceMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{2} -} -func (m *FileSourceMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileSourceMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FileSourceMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FileSourceMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileSourceMessage.Merge(m, src) -} -func (m *FileSourceMessage) XXX_Size() int { - return m.Size() -} -func (m *FileSourceMessage) XXX_DiscardUnknown() { - xxx_messageInfo_FileSourceMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_FileSourceMessage proto.InternalMessageInfo - -type isFileSourceMessage_Msg interface { - isFileSourceMessage_Msg() - Equal(interface{}) bool - MarshalTo([]byte) (int, error) - Size() int -} - -type FileSourceMessage_FileSourceInfoUpdate struct { - FileSourceInfoUpdate *FileSourceInfoUpdate `protobuf:"bytes,1,opt,name=file_source_info_update,json=fileSourceInfoUpdate,proto3,oneof" json:"file_source_info_update,omitempty"` -} -type FileSourceMessage_RemoveFileSourceRequest struct { - RemoveFileSourceRequest *RemoveFileSourceRequest `protobuf:"bytes,2,opt,name=remove_file_source_request,json=removeFileSourceRequest,proto3,oneof" json:"remove_file_source_request,omitempty"` -} -type FileSourceMessage_RegisterFileSourceRequest struct { - RegisterFileSourceRequest *RegisterFileSourceRequest `protobuf:"bytes,3,opt,name=register_file_source_request,json=registerFileSourceRequest,proto3,oneof" json:"register_file_source_request,omitempty"` -} - -func (*FileSourceMessage_FileSourceInfoUpdate) isFileSourceMessage_Msg() {} -func (*FileSourceMessage_RemoveFileSourceRequest) isFileSourceMessage_Msg() {} -func (*FileSourceMessage_RegisterFileSourceRequest) isFileSourceMessage_Msg() {} - -func (m *FileSourceMessage) GetMsg() isFileSourceMessage_Msg { - if m != nil { - return m.Msg - } - return nil -} - -func (m *FileSourceMessage) GetFileSourceInfoUpdate() *FileSourceInfoUpdate { - if x, ok := m.GetMsg().(*FileSourceMessage_FileSourceInfoUpdate); ok { - return x.FileSourceInfoUpdate - } - return nil -} - -func (m *FileSourceMessage) GetRemoveFileSourceRequest() *RemoveFileSourceRequest { - if x, ok := m.GetMsg().(*FileSourceMessage_RemoveFileSourceRequest); ok { - return x.RemoveFileSourceRequest - } - return nil -} - -func (m *FileSourceMessage) GetRegisterFileSourceRequest() *RegisterFileSourceRequest { - if x, ok := m.GetMsg().(*FileSourceMessage_RegisterFileSourceRequest); ok { - return x.RegisterFileSourceRequest - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*FileSourceMessage) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*FileSourceMessage_FileSourceInfoUpdate)(nil), - (*FileSourceMessage_RemoveFileSourceRequest)(nil), - (*FileSourceMessage_RegisterFileSourceRequest)(nil), - } -} - type ConfigUpdateMessage struct { // Types that are valid to be assigned to Msg: // *ConfigUpdateMessage_ConfigUpdateRequest @@ -428,7 +316,7 @@ type ConfigUpdateMessage struct { func (m *ConfigUpdateMessage) Reset() { *m = ConfigUpdateMessage{} } func (*ConfigUpdateMessage) ProtoMessage() {} func (*ConfigUpdateMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{3} + return fileDescriptor_0046fd1b9991f89c, []int{2} } func (m *ConfigUpdateMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -502,7 +390,7 @@ type K8SMetadataMessage struct { func (m *K8SMetadataMessage) Reset() { *m = K8SMetadataMessage{} } func (*K8SMetadataMessage) ProtoMessage() {} func (*K8SMetadataMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{4} + return fileDescriptor_0046fd1b9991f89c, []int{3} } func (m *K8SMetadataMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -597,7 +485,7 @@ type RegisterAgentRequest struct { func (m *RegisterAgentRequest) Reset() { *m = RegisterAgentRequest{} } func (*RegisterAgentRequest) ProtoMessage() {} func (*RegisterAgentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{5} + return fileDescriptor_0046fd1b9991f89c, []int{4} } func (m *RegisterAgentRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -647,7 +535,7 @@ type RegisterAgentResponse struct { func (m *RegisterAgentResponse) Reset() { *m = RegisterAgentResponse{} } func (*RegisterAgentResponse) ProtoMessage() {} func (*RegisterAgentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{6} + return fileDescriptor_0046fd1b9991f89c, []int{5} } func (m *RegisterAgentResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -690,7 +578,7 @@ type AgentDataInfo struct { func (m *AgentDataInfo) Reset() { *m = AgentDataInfo{} } func (*AgentDataInfo) ProtoMessage() {} func (*AgentDataInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{7} + return fileDescriptor_0046fd1b9991f89c, []int{6} } func (m *AgentDataInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -737,7 +625,7 @@ type AgentUpdateInfo struct { func (m *AgentUpdateInfo) Reset() { *m = AgentUpdateInfo{} } func (*AgentUpdateInfo) ProtoMessage() {} func (*AgentUpdateInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{8} + return fileDescriptor_0046fd1b9991f89c, []int{7} } func (m *AgentUpdateInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -811,7 +699,7 @@ type Heartbeat struct { func (m *Heartbeat) Reset() { *m = Heartbeat{} } func (*Heartbeat) ProtoMessage() {} func (*Heartbeat) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{9} + return fileDescriptor_0046fd1b9991f89c, []int{8} } func (m *Heartbeat) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -876,7 +764,7 @@ type MetadataUpdateInfo struct { func (m *MetadataUpdateInfo) Reset() { *m = MetadataUpdateInfo{} } func (*MetadataUpdateInfo) ProtoMessage() {} func (*MetadataUpdateInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{10} + return fileDescriptor_0046fd1b9991f89c, []int{9} } func (m *MetadataUpdateInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -928,7 +816,7 @@ type HeartbeatAck struct { func (m *HeartbeatAck) Reset() { *m = HeartbeatAck{} } func (*HeartbeatAck) ProtoMessage() {} func (*HeartbeatAck) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{11} + return fileDescriptor_0046fd1b9991f89c, []int{10} } func (m *HeartbeatAck) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -985,7 +873,7 @@ type HeartbeatNack struct { func (m *HeartbeatNack) Reset() { *m = HeartbeatNack{} } func (*HeartbeatNack) ProtoMessage() {} func (*HeartbeatNack) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{12} + return fileDescriptor_0046fd1b9991f89c, []int{11} } func (m *HeartbeatNack) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1030,7 +918,7 @@ type ExecuteQueryRequest struct { func (m *ExecuteQueryRequest) Reset() { *m = ExecuteQueryRequest{} } func (*ExecuteQueryRequest) ProtoMessage() {} func (*ExecuteQueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{13} + return fileDescriptor_0046fd1b9991f89c, []int{12} } func (m *ExecuteQueryRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +976,7 @@ type RegisterTracepointRequest struct { func (m *RegisterTracepointRequest) Reset() { *m = RegisterTracepointRequest{} } func (*RegisterTracepointRequest) ProtoMessage() {} func (*RegisterTracepointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{14} + return fileDescriptor_0046fd1b9991f89c, []int{13} } func (m *RegisterTracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1141,7 +1029,7 @@ type TracepointInfoUpdate struct { func (m *TracepointInfoUpdate) Reset() { *m = TracepointInfoUpdate{} } func (*TracepointInfoUpdate) ProtoMessage() {} func (*TracepointInfoUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{15} + return fileDescriptor_0046fd1b9991f89c, []int{14} } func (m *TracepointInfoUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1205,7 +1093,7 @@ type RemoveTracepointRequest struct { func (m *RemoveTracepointRequest) Reset() { *m = RemoveTracepointRequest{} } func (*RemoveTracepointRequest) ProtoMessage() {} func (*RemoveTracepointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{16} + return fileDescriptor_0046fd1b9991f89c, []int{15} } func (m *RemoveTracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1241,167 +1129,6 @@ func (m *RemoveTracepointRequest) GetID() *uuidpb.UUID { return nil } -type RegisterFileSourceRequest struct { - FileSourceDeployment *ir.FileSourceDeployment `protobuf:"bytes,1,opt,name=file_source_deployment,json=fileSourceDeployment,proto3" json:"file_source_deployment,omitempty"` - ID *uuidpb.UUID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` -} - -func (m *RegisterFileSourceRequest) Reset() { *m = RegisterFileSourceRequest{} } -func (*RegisterFileSourceRequest) ProtoMessage() {} -func (*RegisterFileSourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{17} -} -func (m *RegisterFileSourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RegisterFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RegisterFileSourceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RegisterFileSourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterFileSourceRequest.Merge(m, src) -} -func (m *RegisterFileSourceRequest) XXX_Size() int { - return m.Size() -} -func (m *RegisterFileSourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterFileSourceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RegisterFileSourceRequest proto.InternalMessageInfo - -func (m *RegisterFileSourceRequest) GetFileSourceDeployment() *ir.FileSourceDeployment { - if m != nil { - return m.FileSourceDeployment - } - return nil -} - -func (m *RegisterFileSourceRequest) GetID() *uuidpb.UUID { - if m != nil { - return m.ID - } - return nil -} - -type FileSourceInfoUpdate struct { - ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - State statuspb.LifeCycleState `protobuf:"varint,2,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` - Status *statuspb.Status `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - AgentID *uuidpb.UUID `protobuf:"bytes,4,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` -} - -func (m *FileSourceInfoUpdate) Reset() { *m = FileSourceInfoUpdate{} } -func (*FileSourceInfoUpdate) ProtoMessage() {} -func (*FileSourceInfoUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{18} -} -func (m *FileSourceInfoUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileSourceInfoUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FileSourceInfoUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FileSourceInfoUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileSourceInfoUpdate.Merge(m, src) -} -func (m *FileSourceInfoUpdate) XXX_Size() int { - return m.Size() -} -func (m *FileSourceInfoUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_FileSourceInfoUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_FileSourceInfoUpdate proto.InternalMessageInfo - -func (m *FileSourceInfoUpdate) GetID() *uuidpb.UUID { - if m != nil { - return m.ID - } - return nil -} - -func (m *FileSourceInfoUpdate) GetState() statuspb.LifeCycleState { - if m != nil { - return m.State - } - return statuspb.UNKNOWN_STATE -} - -func (m *FileSourceInfoUpdate) GetStatus() *statuspb.Status { - if m != nil { - return m.Status - } - return nil -} - -func (m *FileSourceInfoUpdate) GetAgentID() *uuidpb.UUID { - if m != nil { - return m.AgentID - } - return nil -} - -type RemoveFileSourceRequest struct { - ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (m *RemoveFileSourceRequest) Reset() { *m = RemoveFileSourceRequest{} } -func (*RemoveFileSourceRequest) ProtoMessage() {} -func (*RemoveFileSourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{19} -} -func (m *RemoveFileSourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemoveFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemoveFileSourceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RemoveFileSourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveFileSourceRequest.Merge(m, src) -} -func (m *RemoveFileSourceRequest) XXX_Size() int { - return m.Size() -} -func (m *RemoveFileSourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveFileSourceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoveFileSourceRequest proto.InternalMessageInfo - -func (m *RemoveFileSourceRequest) GetID() *uuidpb.UUID { - if m != nil { - return m.ID - } - return nil -} - type ConfigUpdateRequest struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -1410,7 +1137,7 @@ type ConfigUpdateRequest struct { func (m *ConfigUpdateRequest) Reset() { *m = ConfigUpdateRequest{} } func (*ConfigUpdateRequest) ProtoMessage() {} func (*ConfigUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{20} + return fileDescriptor_0046fd1b9991f89c, []int{16} } func (m *ConfigUpdateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1461,7 +1188,7 @@ type MetricsMessage struct { func (m *MetricsMessage) Reset() { *m = MetricsMessage{} } func (*MetricsMessage) ProtoMessage() {} func (*MetricsMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0046fd1b9991f89c, []int{21} + return fileDescriptor_0046fd1b9991f89c, []int{17} } func (m *MetricsMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1507,7 +1234,6 @@ func (m *MetricsMessage) GetPodName() string { func init() { proto.RegisterType((*VizierMessage)(nil), "px.vizier.messages.VizierMessage") proto.RegisterType((*TracepointMessage)(nil), "px.vizier.messages.TracepointMessage") - proto.RegisterType((*FileSourceMessage)(nil), "px.vizier.messages.FileSourceMessage") proto.RegisterType((*ConfigUpdateMessage)(nil), "px.vizier.messages.ConfigUpdateMessage") proto.RegisterType((*K8SMetadataMessage)(nil), "px.vizier.messages.K8sMetadataMessage") proto.RegisterType((*RegisterAgentRequest)(nil), "px.vizier.messages.RegisterAgentRequest") @@ -1522,9 +1248,6 @@ func init() { proto.RegisterType((*RegisterTracepointRequest)(nil), "px.vizier.messages.RegisterTracepointRequest") proto.RegisterType((*TracepointInfoUpdate)(nil), "px.vizier.messages.TracepointInfoUpdate") proto.RegisterType((*RemoveTracepointRequest)(nil), "px.vizier.messages.RemoveTracepointRequest") - proto.RegisterType((*RegisterFileSourceRequest)(nil), "px.vizier.messages.RegisterFileSourceRequest") - proto.RegisterType((*FileSourceInfoUpdate)(nil), "px.vizier.messages.FileSourceInfoUpdate") - proto.RegisterType((*RemoveFileSourceRequest)(nil), "px.vizier.messages.RemoveFileSourceRequest") proto.RegisterType((*ConfigUpdateRequest)(nil), "px.vizier.messages.ConfigUpdateRequest") proto.RegisterType((*MetricsMessage)(nil), "px.vizier.messages.MetricsMessage") } @@ -1534,112 +1257,104 @@ func init() { } var fileDescriptor_0046fd1b9991f89c = []byte{ - // 1680 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4d, 0x53, 0x1b, 0xcd, - 0x11, 0xd6, 0x4a, 0x02, 0x44, 0xf3, 0x3d, 0x80, 0x11, 0x18, 0x0b, 0xa2, 0x94, 0x63, 0x6c, 0xc7, - 0xab, 0x04, 0x27, 0x31, 0x55, 0x29, 0xbb, 0x62, 0xa1, 0xc4, 0x40, 0x82, 0xcb, 0x5e, 0xb0, 0x5d, - 0x45, 0x55, 0x6a, 0x33, 0xda, 0x1d, 0x89, 0x0d, 0xda, 0x0f, 0xcf, 0xae, 0x08, 0x72, 0x2e, 0x39, - 0xe6, 0x98, 0x43, 0x4e, 0xf9, 0x05, 0x39, 0xe7, 0x9e, 0x73, 0x72, 0xf4, 0x91, 0xaa, 0x54, 0x51, - 0xb1, 0x7c, 0x49, 0xe5, 0xbd, 0xf8, 0x27, 0xbc, 0x35, 0x1f, 0xfb, 0x21, 0x76, 0x05, 0xf8, 0xfa, - 0x9e, 0x98, 0xed, 0x79, 0xfa, 0xe9, 0x99, 0xa7, 0xa7, 0x67, 0x5a, 0xc0, 0x23, 0x9f, 0x1a, 0xb5, - 0x53, 0xeb, 0x83, 0x45, 0x68, 0xcd, 0x26, 0xbe, 0x8f, 0xdb, 0xc4, 0x8f, 0x06, 0x5e, 0x33, 0x1a, - 0xaa, 0x1e, 0x75, 0x03, 0x17, 0x21, 0xef, 0x4c, 0x15, 0x68, 0x35, 0x9c, 0x59, 0x59, 0x68, 0xbb, - 0x6d, 0x97, 0x4f, 0xd7, 0xd8, 0x48, 0x20, 0x57, 0xd6, 0x18, 0x31, 0xf6, 0xac, 0x9a, 0x98, 0xe9, - 0x76, 0x2d, 0xd3, 0x6b, 0xf2, 0x3f, 0x12, 0xf0, 0x84, 0x01, 0x0c, 0x4c, 0x1d, 0x37, 0xa8, 0x79, - 0x1d, 0xec, 0x38, 0x84, 0xd6, 0x4c, 0xcb, 0x0f, 0xa8, 0xd5, 0xec, 0x06, 0x84, 0x81, 0x13, 0x5f, - 0x3a, 0x43, 0x48, 0xc7, 0xa7, 0x59, 0x8e, 0x3d, 0x07, 0xdb, 0x96, 0xa1, 0x07, 0x14, 0x1b, 0x96, - 0xd3, 0xae, 0x59, 0xb4, 0xd6, 0x71, 0xdb, 0x96, 0x81, 0x3b, 0x5e, 0x33, 0x1c, 0x49, 0xf7, 0x5a, - 0x86, 0x7b, 0xcb, 0xea, 0x10, 0xdd, 0x77, 0xbb, 0xd4, 0x20, 0x09, 0x57, 0xe9, 0xb0, 0x7a, 0xc9, - 0xc1, 0x6b, 0xd6, 0x12, 0xab, 0xb9, 0xcb, 0x67, 0x5d, 0xdb, 0x76, 0x9d, 0x5a, 0x13, 0xfb, 0xa4, - 0xe6, 0x07, 0x38, 0xe8, 0x32, 0xe9, 0xc4, 0x40, 0xc2, 0x36, 0x18, 0xcc, 0x3f, 0xc6, 0x94, 0x98, - 0xb5, 0x93, 0x2d, 0x26, 0x71, 0x80, 0x4d, 0x1c, 0x60, 0x2e, 0xb1, 0x18, 0x4a, 0xe4, 0x8f, 0x12, - 0x19, 0xf1, 0x09, 0x3d, 0xb5, 0x0c, 0x12, 0xc3, 0x6b, 0x7e, 0xe0, 0x52, 0xc2, 0xc9, 0x5d, 0x4a, - 0xa4, 0x87, 0x9a, 0xe5, 0x21, 0x63, 0xe1, 0x36, 0x71, 0x02, 0xaf, 0x29, 0xfe, 0x0a, 0x7c, 0xf5, - 0xcf, 0x63, 0x30, 0xf5, 0x96, 0xc3, 0xf7, 0x45, 0x0e, 0xd1, 0xef, 0xe0, 0x16, 0x25, 0x6d, 0xcb, - 0x0f, 0x08, 0xd5, 0x39, 0x52, 0xa7, 0xe4, 0x7d, 0x97, 0xf8, 0x41, 0x59, 0x59, 0x57, 0x36, 0x26, - 0x36, 0x37, 0xd4, 0x74, 0xde, 0x55, 0x4d, 0x7a, 0x3c, 0x67, 0x0e, 0x9a, 0xc0, 0xef, 0xe4, 0xb4, - 0x05, 0x9a, 0x61, 0x47, 0x06, 0x2c, 0xa5, 0x22, 0xf8, 0x9e, 0xeb, 0xf8, 0xa4, 0x9c, 0xe7, 0x21, - 0xee, 0xdf, 0x20, 0x84, 0x70, 0xd8, 0xc9, 0x69, 0x8b, 0x34, 0x6b, 0x02, 0x3d, 0x85, 0xf1, 0x63, - 0x82, 0x69, 0xd0, 0x24, 0x38, 0x28, 0x8f, 0x70, 0xda, 0x3b, 0x59, 0xb4, 0x3b, 0x21, 0x68, 0x27, - 0xa7, 0xc5, 0x1e, 0xe8, 0x05, 0x4c, 0x45, 0x1f, 0x3a, 0x36, 0x4e, 0xca, 0xa3, 0x9c, 0x62, 0xfd, - 0x4a, 0x8a, 0xe7, 0xc6, 0xc9, 0x4e, 0x4e, 0x9b, 0x3c, 0x4e, 0x7c, 0xa3, 0x3d, 0x98, 0x8e, 0x89, - 0x1c, 0xc6, 0x34, 0xc6, 0x99, 0xbe, 0x77, 0x25, 0xd3, 0x4b, 0xcc, 0xa9, 0xe2, 0x35, 0x30, 0x03, - 0xfa, 0x2d, 0x2c, 0x92, 0x33, 0x62, 0x74, 0x03, 0xa2, 0xbf, 0xef, 0x12, 0xda, 0x8b, 0x32, 0x53, - 0xe2, 0x94, 0xf7, 0xb2, 0x28, 0x7f, 0x29, 0x1c, 0x5e, 0x33, 0x7c, 0x9c, 0x98, 0x79, 0x92, 0x36, - 0xa3, 0xb7, 0x80, 0x58, 0xcd, 0x10, 0xcf, 0xb5, 0x9c, 0x40, 0x97, 0x0c, 0x65, 0xe0, 0xdc, 0x77, - 0xb3, 0xb8, 0x0f, 0x23, 0xb4, 0x3c, 0x3c, 0x3b, 0x39, 0x6d, 0x2e, 0xb8, 0x6c, 0x64, 0xcb, 0x36, - 0x5c, 0xa7, 0x65, 0xb5, 0xf5, 0xae, 0x67, 0xe2, 0x80, 0x44, 0xd4, 0x13, 0xc3, 0x97, 0xbd, 0xcd, - 0x1d, 0xde, 0x70, 0x7c, 0x4c, 0x3e, 0x6f, 0xa4, 0xcd, 0xe8, 0x08, 0x16, 0x4e, 0xb6, 0x7c, 0x3d, - 0x2c, 0x8b, 0x88, 0x7d, 0x92, 0xb3, 0xff, 0x20, 0x8b, 0xfd, 0xd7, 0x5b, 0xfe, 0xbe, 0x84, 0xc7, - 0xe4, 0xe8, 0x24, 0x65, 0x45, 0xef, 0x60, 0x3e, 0x71, 0x1f, 0x44, 0xd4, 0x53, 0xc3, 0x35, 0xf9, - 0x95, 0xd5, 0x21, 0x07, 0x1c, 0x9d, 0xd0, 0xa4, 0x75, 0xd9, 0x58, 0x1f, 0x81, 0x82, 0xed, 0xb7, - 0xf7, 0x8a, 0xa5, 0xc2, 0x6c, 0x71, 0xaf, 0x58, 0x2a, 0xce, 0x8e, 0x54, 0xcf, 0xf3, 0x30, 0x97, - 0x52, 0x94, 0x95, 0x63, 0x22, 0x29, 0x96, 0xd3, 0x72, 0xa5, 0x8a, 0x57, 0x95, 0x63, 0x4c, 0xb3, - 0xeb, 0xb4, 0x5c, 0x21, 0x17, 0x2b, 0xc7, 0x20, 0xc3, 0x8e, 0x2c, 0x58, 0xa6, 0xc4, 0x76, 0x4f, - 0x89, 0x9e, 0x08, 0x14, 0x9e, 0x2c, 0x51, 0x90, 0x0f, 0xb3, 0x0b, 0x92, 0x39, 0xc5, 0xa1, 0xe2, - 0xd3, 0xb5, 0x44, 0xb3, 0xa7, 0x90, 0x0b, 0xb7, 0xa3, 0xca, 0xcf, 0x08, 0x56, 0xe0, 0xc1, 0x1e, - 0x5d, 0x55, 0xfd, 0x59, 0xe1, 0x96, 0xe9, 0xb0, 0x49, 0x29, 0x73, 0xf5, 0x3f, 0x79, 0x98, 0x4b, - 0x25, 0x06, 0x61, 0x58, 0x4a, 0x26, 0xf7, 0x86, 0xda, 0xc6, 0x3c, 0x83, 0xda, 0xb6, 0x32, 0xec, - 0xe8, 0xf7, 0xb0, 0x22, 0xb5, 0x4d, 0x46, 0xba, 0xb1, 0xb8, 0x71, 0xac, 0x94, 0xb8, 0xa9, 0x29, - 0xe4, 0xc1, 0x6a, 0x24, 0x6e, 0x56, 0xb4, 0x1b, 0xa8, 0x9b, 0x15, 0x2f, 0x52, 0x37, 0x35, 0x19, - 0xaa, 0xfb, 0x47, 0x98, 0xcf, 0x28, 0xd7, 0x74, 0xd9, 0x0f, 0xbe, 0x23, 0xd7, 0x96, 0x7d, 0xe2, - 0xb6, 0x32, 0xd2, 0xe6, 0x30, 0xf8, 0xff, 0xf3, 0x80, 0xd2, 0xe5, 0x8c, 0x8e, 0x60, 0x7e, 0xe0, - 0x52, 0x48, 0xe7, 0x55, 0x3c, 0x8a, 0xea, 0xc9, 0x96, 0xaf, 0xc6, 0x0f, 0xb0, 0xaa, 0x11, 0xa1, - 0x5a, 0x94, 0xd7, 0xb9, 0xc4, 0xad, 0x20, 0x93, 0x7a, 0x0a, 0xab, 0xb6, 0xe5, 0xfb, 0x96, 0xd3, - 0xd6, 0x07, 0x62, 0x0c, 0xa6, 0xf5, 0xf1, 0xf0, 0x20, 0xfb, 0xc2, 0x3b, 0xb1, 0xec, 0x84, 0xdc, - 0xf6, 0xb0, 0x49, 0xd4, 0x83, 0x3b, 0x43, 0xe2, 0xca, 0xd7, 0x53, 0x64, 0xf8, 0x27, 0x5f, 0x17, - 0x38, 0x7a, 0x48, 0x57, 0xec, 0xa1, 0xb3, 0xa1, 0xd8, 0x1f, 0x60, 0x21, 0xeb, 0xa5, 0x47, 0xcf, - 0xa0, 0xc8, 0xaa, 0x47, 0xca, 0xfb, 0x20, 0x91, 0xd9, 0xb0, 0x07, 0x09, 0x17, 0x24, 0x7a, 0x0f, - 0xee, 0xcc, 0xca, 0x44, 0xe3, 0x7e, 0x68, 0x15, 0x8a, 0xd8, 0xb7, 0x4c, 0xbe, 0x81, 0xa9, 0x7a, - 0xa9, 0x7f, 0xb1, 0x56, 0x7c, 0x7e, 0xb0, 0xdb, 0xd0, 0xb8, 0x75, 0xaf, 0x58, 0xca, 0xcf, 0x16, - 0xaa, 0x3f, 0x87, 0xc5, 0xcc, 0x16, 0x20, 0x72, 0x56, 0xae, 0x70, 0x36, 0x60, 0x8a, 0x3b, 0x35, - 0x70, 0x80, 0x59, 0x5c, 0xa4, 0xc1, 0x54, 0xa4, 0x5f, 0x62, 0xe9, 0xbc, 0x3a, 0x44, 0x7f, 0xa7, - 0xca, 0x86, 0x50, 0x1d, 0x68, 0x44, 0xd5, 0x50, 0x1a, 0xbe, 0xfa, 0x49, 0x3b, 0xf1, 0x55, 0xfd, - 0x26, 0x0f, 0x33, 0x3c, 0x8a, 0x38, 0x27, 0x3c, 0xce, 0x33, 0x18, 0xf5, 0x8d, 0x63, 0x62, 0xe3, - 0x72, 0x7e, 0xbd, 0x70, 0xe9, 0x39, 0x8a, 0xb4, 0x89, 0xba, 0xbe, 0x43, 0xdc, 0xec, 0x70, 0x3f, - 0x4d, 0x7a, 0xa1, 0xd7, 0x30, 0xe3, 0x51, 0xd7, 0x20, 0xbe, 0xaf, 0x1b, 0x94, 0xe0, 0x80, 0x98, - 0xe5, 0x22, 0x27, 0xba, 0xe2, 0x0c, 0xbf, 0x12, 0x0e, 0xdb, 0x02, 0xaf, 0x4d, 0x7b, 0x03, 0xdf, - 0xe8, 0x08, 0x50, 0x48, 0x19, 0x10, 0x6a, 0x5b, 0x0e, 0x67, 0x1d, 0xe1, 0xac, 0x0f, 0xaf, 0x65, - 0x3d, 0x8c, 0x5c, 0xb4, 0x39, 0xef, 0xb2, 0x09, 0xfd, 0x10, 0x90, 0xe9, 0x12, 0x3f, 0xac, 0x78, - 0xb9, 0x75, 0xd6, 0x3b, 0x95, 0xb4, 0x59, 0x36, 0x23, 0xa4, 0x39, 0x10, 0x9b, 0xfb, 0x29, 0x14, - 0x19, 0xf9, 0x55, 0x1d, 0xd1, 0x40, 0xd6, 0x34, 0x0e, 0x17, 0x8f, 0x66, 0xf5, 0x5f, 0x0a, 0x8c, - 0x47, 0xfd, 0x12, 0x7a, 0x02, 0x25, 0xd1, 0x4a, 0xca, 0x83, 0x30, 0xb1, 0x39, 0xc3, 0xe8, 0xc4, - 0x4f, 0x0d, 0xf5, 0xcd, 0x9b, 0xdd, 0x46, 0x7d, 0xa2, 0x7f, 0xb1, 0x36, 0x26, 0x4e, 0x5e, 0x43, - 0x1b, 0xe3, 0xe8, 0x5d, 0x13, 0x21, 0x28, 0x06, 0x96, 0x2d, 0x3a, 0xcf, 0x82, 0xc6, 0xc7, 0xa8, - 0x01, 0x13, 0x72, 0x03, 0xfc, 0x68, 0x88, 0xb2, 0xfa, 0xfe, 0xd0, 0xe5, 0xc5, 0xe9, 0xd6, 0xa0, - 0x1b, 0xa7, 0xfe, 0x1e, 0xcc, 0xf8, 0xac, 0x3e, 0x1c, 0x83, 0xe8, 0x4e, 0xd7, 0x6e, 0x12, 0x5a, - 0x2e, 0xf2, 0x20, 0xd3, 0xa1, 0xf9, 0x25, 0xb7, 0x56, 0x7b, 0x80, 0x06, 0x6f, 0x18, 0xee, 0xbe, - 0x09, 0x93, 0xf2, 0x80, 0xe8, 0x86, 0x65, 0x52, 0xbe, 0xc0, 0xf1, 0xfa, 0x4c, 0xff, 0x62, 0x6d, - 0xe2, 0x40, 0xd8, 0xb7, 0x77, 0x1b, 0x9a, 0x36, 0x21, 0x41, 0xdb, 0x96, 0x49, 0xd1, 0x7d, 0x18, - 0xf7, 0x5c, 0x93, 0xe3, 0xfd, 0x72, 0x61, 0xbd, 0xb0, 0x31, 0x5e, 0x9f, 0xec, 0x5f, 0xac, 0x95, - 0x5e, 0xb9, 0x26, 0x03, 0xfb, 0x5a, 0xc9, 0x73, 0x4d, 0x86, 0xf4, 0xf7, 0x8a, 0x25, 0x65, 0x36, - 0x5f, 0xfd, 0xab, 0x02, 0x93, 0xc9, 0xf6, 0x35, 0x92, 0x43, 0x49, 0xc8, 0x91, 0xb1, 0x91, 0x7c, - 0xd6, 0x46, 0xd0, 0x8b, 0x2c, 0xdd, 0x32, 0x1b, 0xb0, 0xf4, 0x7e, 0x93, 0xd2, 0x55, 0x6b, 0x30, - 0x35, 0xd0, 0x0a, 0xa3, 0x0a, 0x00, 0x25, 0xe1, 0x43, 0xc4, 0x17, 0x57, 0xd2, 0x12, 0x96, 0xea, - 0xdf, 0x14, 0x98, 0xcf, 0xe8, 0x74, 0xd9, 0xb1, 0x10, 0x9d, 0xf2, 0x35, 0xc7, 0x82, 0x3b, 0xb1, - 0x63, 0xc1, 0xd1, 0xbb, 0x26, 0x7a, 0x00, 0x45, 0x56, 0xff, 0x72, 0x0f, 0xb7, 0x2e, 0x5d, 0x0b, - 0xac, 0x1c, 0x3a, 0xd8, 0xd1, 0x38, 0x06, 0x95, 0x61, 0x0c, 0x3b, 0xb8, 0xd3, 0xfb, 0x40, 0x78, - 0x82, 0x4b, 0x5a, 0xf8, 0x29, 0x2f, 0x9f, 0x7f, 0x2a, 0xb0, 0x3c, 0xb4, 0x7f, 0x41, 0x7f, 0x80, - 0xc5, 0x44, 0x2b, 0x64, 0x12, 0xaf, 0xe3, 0xf6, 0x6c, 0xe2, 0x84, 0xcf, 0x64, 0x3d, 0xeb, 0x46, - 0x1a, 0xfc, 0x85, 0xab, 0x5a, 0x54, 0x0d, 0x7f, 0xa6, 0xc6, 0xfc, 0x8d, 0x88, 0x29, 0xd9, 0xf7, - 0xc5, 0x56, 0x74, 0x0f, 0xf2, 0x96, 0x29, 0x1f, 0xab, 0x94, 0x2a, 0xa3, 0xfd, 0x8b, 0xb5, 0xfc, - 0x6e, 0x43, 0xcb, 0x5b, 0x66, 0xf5, 0x5c, 0x81, 0x85, 0xac, 0x8e, 0x52, 0x32, 0x28, 0xd7, 0x32, - 0xa0, 0x1f, 0xc3, 0x08, 0xfb, 0x05, 0x2c, 0xaa, 0x6c, 0x7a, 0xf3, 0x36, 0xbf, 0x65, 0xe4, 0x6f, - 0x63, 0xf5, 0x37, 0x56, 0x8b, 0x6c, 0xf7, 0x8c, 0x0e, 0x39, 0x60, 0x10, 0x4d, 0x20, 0xd1, 0x43, - 0x18, 0x15, 0x08, 0x99, 0x82, 0xf9, 0x01, 0x9f, 0x03, 0x3e, 0xd0, 0x24, 0x64, 0xa0, 0xfa, 0x8b, - 0x5f, 0x51, 0xfd, 0xd5, 0x3a, 0x2c, 0x0d, 0x69, 0x63, 0x6f, 0xbc, 0xb9, 0xea, 0x3f, 0x12, 0xe9, - 0x4d, 0x77, 0x65, 0x1d, 0xb8, 0x95, 0x6c, 0xc6, 0x52, 0xf9, 0xfd, 0x59, 0x46, 0x7e, 0x13, 0x0e, - 0x2c, 0xb7, 0x31, 0x69, 0x32, 0xa7, 0xad, 0x0c, 0xeb, 0xd7, 0xe5, 0x34, 0xab, 0x93, 0xfd, 0x2e, - 0xe5, 0x34, 0x9d, 0x8c, 0x1b, 0xe7, 0xf4, 0xe9, 0x60, 0x4b, 0x1b, 0xfa, 0xcf, 0x42, 0xe1, 0x84, - 0xf4, 0x38, 0xc1, 0xb8, 0xc6, 0x86, 0x68, 0x01, 0x46, 0x4e, 0x71, 0xa7, 0x2b, 0x54, 0x18, 0xd7, - 0xc4, 0x47, 0xf5, 0x1d, 0x4c, 0xef, 0x93, 0x80, 0x5a, 0x86, 0x1f, 0xf6, 0xa3, 0x0f, 0x80, 0xbd, - 0x96, 0x36, 0x6b, 0xda, 0x98, 0x59, 0x0f, 0xc8, 0x59, 0x20, 0x79, 0xd8, 0x03, 0x6f, 0x4b, 0xf8, - 0x21, 0x39, 0x0b, 0xd0, 0x32, 0xb0, 0x6b, 0x5a, 0x77, 0xb0, 0x1d, 0xd2, 0x8e, 0x79, 0xae, 0xf9, - 0x12, 0xdb, 0xa4, 0xfe, 0x8b, 0x8f, 0x9f, 0x2a, 0xb9, 0xf3, 0x4f, 0x95, 0xdc, 0x97, 0x4f, 0x15, - 0xe5, 0x4f, 0xfd, 0x8a, 0xf2, 0xf7, 0x7e, 0x45, 0xf9, 0x77, 0xbf, 0xa2, 0x7c, 0xec, 0x57, 0x94, - 0xff, 0xf6, 0x2b, 0xca, 0xff, 0xfa, 0x95, 0xdc, 0x97, 0x7e, 0x45, 0xf9, 0xcb, 0xe7, 0x4a, 0xee, - 0xe3, 0xe7, 0x4a, 0xee, 0xfc, 0x73, 0x25, 0x77, 0x04, 0xf1, 0x3f, 0xf1, 0x9a, 0xa3, 0xfc, 0xff, - 0x3e, 0x8f, 0xbf, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xac, 0xf1, 0xdd, 0xed, 0x13, 0x00, 0x00, + // 1544 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0x4b, 0x6f, 0x1b, 0x47, + 0x12, 0xe6, 0x90, 0x94, 0x44, 0x95, 0xde, 0x2d, 0xc9, 0xa6, 0xfc, 0xa0, 0xb4, 0x5c, 0x78, 0x2d, + 0xdb, 0xeb, 0xe1, 0xae, 0xbc, 0x0b, 0x0b, 0x58, 0xd8, 0x58, 0x53, 0x5c, 0x58, 0xd2, 0x46, 0x86, + 0x3d, 0x92, 0x1d, 0x40, 0x40, 0x30, 0x69, 0xce, 0xb4, 0xa8, 0x81, 0x38, 0x0f, 0x77, 0x0f, 0x15, + 0xd1, 0xb9, 0xe4, 0x27, 0xe4, 0x90, 0x53, 0x7e, 0x41, 0x6e, 0xf9, 0x05, 0x39, 0x27, 0x47, 0x1f, + 0x75, 0x12, 0x62, 0xfa, 0x12, 0x24, 0x17, 0xff, 0x84, 0xa0, 0x1f, 0xf3, 0xa0, 0x38, 0x94, 0xec, + 0x93, 0x7a, 0xaa, 0xbf, 0xfa, 0xaa, 0xbb, 0x1e, 0x5d, 0x45, 0xc1, 0x7d, 0x46, 0xad, 0xda, 0xb1, + 0xf3, 0xc6, 0x21, 0xb4, 0xe6, 0x12, 0xc6, 0x70, 0x8b, 0xb0, 0x78, 0x11, 0x34, 0xe3, 0xa5, 0x1e, + 0x50, 0x3f, 0xf4, 0x11, 0x0a, 0x4e, 0x74, 0x89, 0xd6, 0xa3, 0x9d, 0x6b, 0x0b, 0x2d, 0xbf, 0xe5, + 0x8b, 0xed, 0x1a, 0x5f, 0x49, 0xe4, 0xb5, 0x65, 0x4e, 0x8c, 0x03, 0xa7, 0x26, 0x77, 0x3a, 0x1d, + 0xc7, 0x0e, 0x9a, 0xe2, 0x8f, 0x02, 0x3c, 0xe4, 0x00, 0x0b, 0x53, 0xcf, 0x0f, 0x6b, 0x41, 0x1b, + 0x7b, 0x1e, 0xa1, 0x35, 0xdb, 0x61, 0x21, 0x75, 0x9a, 0x9d, 0x90, 0x70, 0x70, 0xea, 0xcb, 0xe4, + 0x08, 0xa5, 0xf8, 0x28, 0x4b, 0xb1, 0xeb, 0x61, 0xd7, 0xb1, 0xcc, 0x90, 0x62, 0xcb, 0xf1, 0x5a, + 0x35, 0x87, 0xd6, 0xda, 0x7e, 0xcb, 0xb1, 0x70, 0x3b, 0x68, 0x46, 0x2b, 0xa5, 0x7e, 0xe3, 0x9c, + 0x7a, 0xd0, 0xac, 0xa5, 0xc8, 0x6f, 0x89, 0x5d, 0xdf, 0x75, 0x7d, 0xaf, 0xd6, 0xc4, 0x8c, 0xd4, + 0x58, 0x88, 0xc3, 0x0e, 0xf7, 0x84, 0x5c, 0x28, 0xd8, 0x2a, 0x87, 0xb1, 0x43, 0x4c, 0x89, 0x5d, + 0x3b, 0x5a, 0xe7, 0x1e, 0x0b, 0xb1, 0x8d, 0x43, 0x2c, 0x3c, 0x26, 0x97, 0x0a, 0xf9, 0x8f, 0x94, + 0x83, 0x19, 0xa1, 0xc7, 0x8e, 0x45, 0x12, 0x78, 0x8d, 0x85, 0x3e, 0x25, 0x82, 0xdc, 0xa7, 0x44, + 0x69, 0xe8, 0x59, 0x1a, 0xca, 0x16, 0x6e, 0x11, 0x2f, 0x0c, 0x9a, 0xf2, 0xaf, 0xc4, 0x57, 0x7f, + 0x1c, 0x85, 0xa9, 0x57, 0x02, 0xbe, 0x23, 0x43, 0x82, 0xbe, 0x84, 0x2b, 0x94, 0xb4, 0x1c, 0x16, + 0x12, 0x6a, 0x0a, 0xa4, 0x49, 0xc9, 0xeb, 0x0e, 0x61, 0x61, 0x59, 0x5b, 0xd1, 0x56, 0x27, 0xd6, + 0x56, 0xf5, 0xc1, 0x30, 0xea, 0x86, 0xd2, 0x78, 0xc2, 0x15, 0x0c, 0x89, 0xdf, 0xcc, 0x19, 0x0b, + 0x34, 0x43, 0x8e, 0x2c, 0xb8, 0x3a, 0x60, 0x81, 0x05, 0xbe, 0xc7, 0x48, 0x39, 0x2f, 0x4c, 0xdc, + 0xf9, 0x08, 0x13, 0x52, 0x61, 0x33, 0x67, 0x2c, 0xd2, 0xac, 0x0d, 0xf4, 0x08, 0xc6, 0x0f, 0x09, + 0xa6, 0x61, 0x93, 0xe0, 0xb0, 0x3c, 0x22, 0x68, 0x6f, 0x66, 0xd1, 0x6e, 0x46, 0xa0, 0xcd, 0x9c, + 0x91, 0x68, 0xa0, 0xa7, 0x30, 0x15, 0x7f, 0x98, 0xd8, 0x3a, 0x2a, 0x8f, 0x0a, 0x8a, 0x95, 0x0b, + 0x29, 0x9e, 0x58, 0x47, 0x9b, 0x39, 0x63, 0xf2, 0x30, 0xf5, 0x8d, 0xb6, 0x61, 0x3a, 0x21, 0xf2, + 0x38, 0xd3, 0x98, 0x60, 0xfa, 0xcb, 0x85, 0x4c, 0xcf, 0xb0, 0xa0, 0x4a, 0xce, 0xc0, 0x05, 0xe8, + 0x0b, 0x58, 0x24, 0x27, 0xc4, 0xea, 0x84, 0xc4, 0x7c, 0xdd, 0x21, 0xb4, 0x1b, 0x47, 0xa6, 0x24, + 0x28, 0x6f, 0x67, 0x51, 0xfe, 0x4f, 0x2a, 0xbc, 0xe0, 0xf8, 0x24, 0x30, 0xf3, 0x64, 0x50, 0x8c, + 0x5e, 0x01, 0xe2, 0x25, 0x40, 0x02, 0xdf, 0xf1, 0x42, 0x53, 0x31, 0x94, 0x41, 0x70, 0xdf, 0xca, + 0xe2, 0xde, 0x8b, 0xd1, 0x2a, 0x79, 0x36, 0x73, 0xc6, 0x5c, 0x78, 0x5e, 0xc8, 0x8f, 0x6d, 0xf9, + 0xde, 0x81, 0xd3, 0x32, 0x3b, 0x81, 0x8d, 0x43, 0x12, 0x53, 0x4f, 0x0c, 0x3f, 0xf6, 0x86, 0x50, + 0x78, 0x29, 0xf0, 0x09, 0xf9, 0xbc, 0x35, 0x28, 0x46, 0xfb, 0xb0, 0x70, 0xb4, 0xce, 0xcc, 0xa8, + 0x2c, 0x62, 0xf6, 0x49, 0xc1, 0xfe, 0xb7, 0x2c, 0xf6, 0xff, 0xaf, 0xb3, 0x1d, 0x05, 0x4f, 0xc8, + 0xd1, 0xd1, 0x80, 0xb4, 0x3e, 0x02, 0x05, 0x97, 0xb5, 0xb6, 0x8b, 0xa5, 0xc2, 0x6c, 0x71, 0xbb, + 0x58, 0x2a, 0xce, 0x8e, 0x54, 0x4f, 0xf3, 0x30, 0x37, 0x70, 0x71, 0x5e, 0x35, 0x29, 0xdf, 0x39, + 0xde, 0x81, 0xaf, 0x2e, 0x7b, 0x51, 0xd5, 0x24, 0x34, 0x5b, 0xde, 0x81, 0x2f, 0x6f, 0xc5, 0xab, + 0x26, 0xcc, 0x90, 0x23, 0x07, 0x96, 0x28, 0x71, 0xfd, 0x63, 0x62, 0xa6, 0x0c, 0x45, 0x09, 0x20, + 0xeb, 0xe6, 0x5e, 0x76, 0xdd, 0x70, 0xa5, 0xc4, 0x54, 0x92, 0x04, 0x57, 0x69, 0xf6, 0x16, 0xf2, + 0xe1, 0x7a, 0x5c, 0xa0, 0x19, 0xc6, 0x0a, 0xc2, 0xd8, 0xfd, 0x8b, 0x8a, 0x34, 0xcb, 0xdc, 0x12, + 0x1d, 0xb6, 0xa9, 0xdc, 0x5c, 0xfd, 0x1a, 0xe6, 0x33, 0xe2, 0x3e, 0x98, 0x3f, 0xfd, 0x0f, 0xd2, + 0xa5, 0xf9, 0x93, 0x4a, 0x7b, 0x6b, 0x50, 0x1c, 0x19, 0xff, 0x3d, 0x0f, 0x68, 0x30, 0x2f, 0xd0, + 0x3e, 0xcc, 0xf7, 0x65, 0xd7, 0x60, 0x54, 0xe5, 0xeb, 0xaa, 0x1f, 0xad, 0x33, 0x3d, 0x79, 0xc9, + 0x75, 0x83, 0x30, 0xbf, 0x43, 0x2d, 0x12, 0x47, 0x75, 0x2e, 0x95, 0x5e, 0x2a, 0xa4, 0xc7, 0x70, + 0xc3, 0x75, 0x18, 0x73, 0xbc, 0x96, 0xd9, 0x67, 0xa3, 0x3f, 0xaa, 0x0f, 0x86, 0x1b, 0xd9, 0x91, + 0xda, 0xa9, 0x63, 0xa7, 0xdc, 0xed, 0x0e, 0xdb, 0x44, 0x5d, 0xb8, 0x39, 0xc4, 0xae, 0x7a, 0x86, + 0x65, 0x84, 0xff, 0xf5, 0x69, 0x86, 0xe3, 0x17, 0xf9, 0x9a, 0x3b, 0x74, 0x37, 0x72, 0xf6, 0x1b, + 0x58, 0xc8, 0x6a, 0x19, 0xe8, 0x31, 0x14, 0x79, 0xed, 0x28, 0xf7, 0xde, 0x4d, 0x45, 0x36, 0x6a, + 0x66, 0xd1, 0x81, 0x64, 0x13, 0x13, 0xca, 0xbc, 0x48, 0x0c, 0xa1, 0x87, 0x6e, 0x40, 0x11, 0x33, + 0xc7, 0x16, 0x17, 0x98, 0xaa, 0x97, 0x7a, 0x67, 0xcb, 0xc5, 0x27, 0xbb, 0x5b, 0x0d, 0x43, 0x48, + 0xb7, 0x8b, 0xa5, 0xfc, 0x6c, 0xa1, 0xfa, 0x1f, 0x58, 0xcc, 0xec, 0x25, 0xb1, 0xb2, 0x76, 0x81, + 0xb2, 0x05, 0x53, 0x42, 0xa9, 0x81, 0x43, 0xcc, 0xed, 0x22, 0x03, 0xa6, 0x62, 0xff, 0xa5, 0x8e, + 0x2e, 0xaa, 0x43, 0x0e, 0x0a, 0xba, 0x9a, 0x33, 0xf4, 0xbe, 0x01, 0x45, 0x8f, 0x5c, 0x23, 0x4e, + 0x3f, 0xe9, 0xa6, 0xbe, 0xaa, 0x7f, 0xe4, 0x61, 0x46, 0x58, 0x91, 0x79, 0x22, 0xec, 0x3c, 0x86, + 0x51, 0x66, 0x1d, 0x12, 0x17, 0x97, 0xf3, 0x2b, 0x85, 0x73, 0xef, 0x5a, 0xec, 0x9b, 0x78, 0x7c, + 0xd8, 0xc3, 0xcd, 0xb6, 0xd0, 0x33, 0x94, 0x16, 0x7a, 0x01, 0x33, 0x01, 0xf5, 0x2d, 0xc2, 0x98, + 0x69, 0x51, 0x82, 0x43, 0x62, 0x97, 0x8b, 0x82, 0xe8, 0x82, 0x1c, 0x7e, 0x2e, 0x15, 0x36, 0x24, + 0xde, 0x98, 0x0e, 0xfa, 0xbe, 0xd1, 0x3e, 0xa0, 0x88, 0x32, 0x24, 0xd4, 0x75, 0x3c, 0xc1, 0x3a, + 0x22, 0x58, 0xef, 0x5d, 0xca, 0xba, 0x17, 0xab, 0x18, 0x73, 0xc1, 0x79, 0x11, 0xfa, 0x3b, 0x20, + 0xdb, 0x27, 0x2c, 0xaa, 0x78, 0x75, 0x75, 0xde, 0x84, 0x4b, 0xc6, 0x2c, 0xdf, 0x91, 0xae, 0xd9, + 0x95, 0x97, 0xfb, 0x37, 0x14, 0x39, 0xf9, 0x45, 0xad, 0xb5, 0x2f, 0x6a, 0x86, 0x80, 0xcb, 0x67, + 0xbd, 0xfa, 0xb3, 0x06, 0xe3, 0x71, 0xe3, 0x45, 0x0f, 0xa1, 0x24, 0x67, 0x12, 0x95, 0x08, 0x13, + 0x6b, 0x33, 0x9c, 0x4e, 0x8e, 0xa0, 0xfa, 0xcb, 0x97, 0x5b, 0x8d, 0xfa, 0x44, 0xef, 0x6c, 0x79, + 0x4c, 0x66, 0x5e, 0xc3, 0x18, 0x13, 0xe8, 0x2d, 0x1b, 0x21, 0x28, 0x86, 0x8e, 0x2b, 0x47, 0x98, + 0x82, 0x21, 0xd6, 0xa8, 0x01, 0x13, 0xea, 0x02, 0x22, 0x35, 0x64, 0x59, 0xfd, 0x75, 0xe8, 0xf1, + 0x92, 0x70, 0x1b, 0xd0, 0x49, 0x42, 0x7f, 0x1b, 0x66, 0x18, 0xaf, 0x0f, 0xcf, 0x22, 0xa6, 0xd7, + 0x71, 0x9b, 0x84, 0x96, 0x8b, 0xc2, 0xc8, 0x74, 0x24, 0x7e, 0x26, 0xa4, 0xd5, 0x2e, 0xa0, 0xfe, + 0x17, 0x46, 0xa8, 0xaf, 0xc1, 0xa4, 0x4a, 0x10, 0xd3, 0x72, 0x6c, 0x2a, 0x0e, 0x38, 0x5e, 0x9f, + 0xe9, 0x9d, 0x2d, 0x4f, 0xec, 0x4a, 0xf9, 0xc6, 0x56, 0xc3, 0x30, 0x26, 0x14, 0x68, 0xc3, 0xb1, + 0x29, 0xba, 0x03, 0xe3, 0x81, 0x6f, 0x0b, 0x3c, 0x2b, 0x17, 0x56, 0x0a, 0xab, 0xe3, 0xf5, 0xc9, + 0xde, 0xd9, 0x72, 0xe9, 0xb9, 0x6f, 0x73, 0x30, 0x33, 0x4a, 0x81, 0x6f, 0x73, 0x24, 0xdb, 0x2e, + 0x96, 0xb4, 0xd9, 0x7c, 0xf5, 0x3b, 0x0d, 0x26, 0xd3, 0x73, 0x50, 0xec, 0x0e, 0x2d, 0xe5, 0x8e, + 0x8c, 0x8b, 0xe4, 0xb3, 0x2e, 0x82, 0x9e, 0x66, 0xf9, 0x2d, 0xb3, 0x93, 0x0f, 0xde, 0x37, 0xed, + 0xba, 0x6a, 0x0d, 0xa6, 0xfa, 0x66, 0x2a, 0x54, 0x01, 0xa0, 0x24, 0x6a, 0x44, 0xe2, 0x70, 0x25, + 0x23, 0x25, 0xa9, 0x7e, 0xaf, 0xc1, 0x7c, 0xc6, 0xc8, 0xc4, 0xd3, 0x42, 0x8e, 0x5c, 0x97, 0xa4, + 0x85, 0x50, 0xe2, 0x69, 0x21, 0xd0, 0x5b, 0x36, 0xba, 0x0b, 0x45, 0x5e, 0xff, 0xea, 0x0e, 0x57, + 0xce, 0x3d, 0x0b, 0xbc, 0x1c, 0xda, 0xd8, 0x33, 0x04, 0x06, 0x95, 0x61, 0x0c, 0x7b, 0xb8, 0xdd, + 0x7d, 0x43, 0x44, 0x80, 0x4b, 0x46, 0xf4, 0xa9, 0x1e, 0x9f, 0x9f, 0x34, 0x58, 0x1a, 0xda, 0x61, + 0xd1, 0x57, 0xb0, 0x98, 0x6a, 0xd6, 0x36, 0x09, 0xda, 0x7e, 0xd7, 0x25, 0x5e, 0xd4, 0x26, 0xeb, + 0x59, 0x2f, 0x52, 0xff, 0x2f, 0x1f, 0xdd, 0xa1, 0x7a, 0xf4, 0x7b, 0x27, 0xe1, 0x6f, 0xc4, 0x4c, + 0xe9, 0xc9, 0x24, 0x91, 0xa2, 0xdb, 0x90, 0x77, 0x6c, 0xd5, 0xac, 0x06, 0xbc, 0x32, 0xda, 0x3b, + 0x5b, 0xce, 0x6f, 0x35, 0x8c, 0xbc, 0x63, 0x57, 0x4f, 0x35, 0x58, 0xc8, 0x9a, 0x79, 0x14, 0x83, + 0x76, 0x29, 0x03, 0xfa, 0x27, 0x8c, 0xf0, 0x9f, 0x52, 0xb2, 0xca, 0xa6, 0xd7, 0xae, 0x8b, 0x57, + 0x46, 0xfd, 0xc8, 0xd2, 0x3f, 0x73, 0x0e, 0xc8, 0x46, 0xd7, 0x6a, 0x93, 0x5d, 0x0e, 0x31, 0x24, + 0x12, 0xdd, 0x83, 0x51, 0x89, 0x50, 0x21, 0x98, 0xef, 0xd3, 0xd9, 0x15, 0x0b, 0x43, 0x41, 0xfa, + 0xaa, 0xbf, 0xf8, 0x09, 0xd5, 0x5f, 0xad, 0xc3, 0xd5, 0x21, 0x83, 0xd6, 0x47, 0x5f, 0xae, 0xfa, + 0xa8, 0x7f, 0xfc, 0x89, 0xf4, 0x67, 0xa1, 0x70, 0x44, 0xba, 0x82, 0x60, 0xdc, 0xe0, 0x4b, 0xb4, + 0x00, 0x23, 0xc7, 0xb8, 0xdd, 0x91, 0x5e, 0x18, 0x37, 0xe4, 0x47, 0xf5, 0x73, 0x98, 0xde, 0x21, + 0x21, 0x75, 0x2c, 0x16, 0xcd, 0x2e, 0x77, 0x81, 0xbf, 0xac, 0x2e, 0x6f, 0xf0, 0x5c, 0x6c, 0x86, + 0xe4, 0x24, 0x54, 0x3c, 0xbc, 0x19, 0xb8, 0x0a, 0xbe, 0x47, 0x4e, 0x42, 0xb4, 0x04, 0xbc, 0xa4, + 0x4d, 0x0f, 0xbb, 0x11, 0xed, 0x58, 0xe0, 0xdb, 0xcf, 0xb0, 0x4b, 0xea, 0xff, 0x7d, 0xfb, 0xae, + 0x92, 0x3b, 0x7d, 0x57, 0xc9, 0x7d, 0x78, 0x57, 0xd1, 0xbe, 0xe9, 0x55, 0xb4, 0x1f, 0x7a, 0x15, + 0xed, 0x97, 0x5e, 0x45, 0x7b, 0xdb, 0xab, 0x68, 0xbf, 0xf6, 0x2a, 0xda, 0x6f, 0xbd, 0x4a, 0xee, + 0x43, 0xaf, 0xa2, 0x7d, 0xfb, 0xbe, 0x92, 0x7b, 0xfb, 0xbe, 0x92, 0x3b, 0x7d, 0x5f, 0xc9, 0xed, + 0x43, 0xf2, 0x8f, 0x80, 0xe6, 0xa8, 0xf8, 0xb1, 0xf9, 0xe0, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x5c, 0x2e, 0x01, 0x20, 0x31, 0x10, 0x00, 0x00, } func (this *VizierMessage) Equal(that interface{}) bool { @@ -1888,30 +1603,6 @@ func (this *VizierMessage_K8SMetadataMessage) Equal(that interface{}) bool { } return true } -func (this *VizierMessage_FileSourceMessage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*VizierMessage_FileSourceMessage) - if !ok { - that2, ok := that.(VizierMessage_FileSourceMessage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.FileSourceMessage.Equal(that1.FileSourceMessage) { - return false - } - return true -} func (this *TracepointMessage) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2014,14 +1705,14 @@ func (this *TracepointMessage_RegisterTracepointRequest) Equal(that interface{}) } return true } -func (this *FileSourceMessage) Equal(that interface{}) bool { +func (this *ConfigUpdateMessage) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*FileSourceMessage) + that1, ok := that.(*ConfigUpdateMessage) if !ok { - that2, ok := that.(FileSourceMessage) + that2, ok := that.(ConfigUpdateMessage) if ok { that1 = &that2 } else { @@ -2044,14 +1735,14 @@ func (this *FileSourceMessage) Equal(that interface{}) bool { } return true } -func (this *FileSourceMessage_FileSourceInfoUpdate) Equal(that interface{}) bool { +func (this *ConfigUpdateMessage_ConfigUpdateRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*FileSourceMessage_FileSourceInfoUpdate) + that1, ok := that.(*ConfigUpdateMessage_ConfigUpdateRequest) if !ok { - that2, ok := that.(FileSourceMessage_FileSourceInfoUpdate) + that2, ok := that.(ConfigUpdateMessage_ConfigUpdateRequest) if ok { that1 = &that2 } else { @@ -2063,19 +1754,19 @@ func (this *FileSourceMessage_FileSourceInfoUpdate) Equal(that interface{}) bool } else if this == nil { return false } - if !this.FileSourceInfoUpdate.Equal(that1.FileSourceInfoUpdate) { + if !this.ConfigUpdateRequest.Equal(that1.ConfigUpdateRequest) { return false } return true } -func (this *FileSourceMessage_RemoveFileSourceRequest) Equal(that interface{}) bool { +func (this *K8SMetadataMessage) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*FileSourceMessage_RemoveFileSourceRequest) + that1, ok := that.(*K8SMetadataMessage) if !ok { - that2, ok := that.(FileSourceMessage_RemoveFileSourceRequest) + that2, ok := that.(K8SMetadataMessage) if ok { that1 = &that2 } else { @@ -2087,19 +1778,25 @@ func (this *FileSourceMessage_RemoveFileSourceRequest) Equal(that interface{}) b } else if this == nil { return false } - if !this.RemoveFileSourceRequest.Equal(that1.RemoveFileSourceRequest) { + if that1.Msg == nil { + if this.Msg != nil { + return false + } + } else if this.Msg == nil { + return false + } else if !this.Msg.Equal(that1.Msg) { return false } return true } -func (this *FileSourceMessage_RegisterFileSourceRequest) Equal(that interface{}) bool { +func (this *K8SMetadataMessage_K8SMetadataUpdate) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*FileSourceMessage_RegisterFileSourceRequest) + that1, ok := that.(*K8SMetadataMessage_K8SMetadataUpdate) if !ok { - that2, ok := that.(FileSourceMessage_RegisterFileSourceRequest) + that2, ok := that.(K8SMetadataMessage_K8SMetadataUpdate) if ok { that1 = &that2 } else { @@ -2111,19 +1808,19 @@ func (this *FileSourceMessage_RegisterFileSourceRequest) Equal(that interface{}) } else if this == nil { return false } - if !this.RegisterFileSourceRequest.Equal(that1.RegisterFileSourceRequest) { + if !this.K8SMetadataUpdate.Equal(that1.K8SMetadataUpdate) { return false } return true } -func (this *ConfigUpdateMessage) Equal(that interface{}) bool { +func (this *K8SMetadataMessage_MissingK8SMetadataRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*ConfigUpdateMessage) + that1, ok := that.(*K8SMetadataMessage_MissingK8SMetadataRequest) if !ok { - that2, ok := that.(ConfigUpdateMessage) + that2, ok := that.(K8SMetadataMessage_MissingK8SMetadataRequest) if ok { that1 = &that2 } else { @@ -2135,115 +1832,7 @@ func (this *ConfigUpdateMessage) Equal(that interface{}) bool { } else if this == nil { return false } - if that1.Msg == nil { - if this.Msg != nil { - return false - } - } else if this.Msg == nil { - return false - } else if !this.Msg.Equal(that1.Msg) { - return false - } - return true -} -func (this *ConfigUpdateMessage_ConfigUpdateRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ConfigUpdateMessage_ConfigUpdateRequest) - if !ok { - that2, ok := that.(ConfigUpdateMessage_ConfigUpdateRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.ConfigUpdateRequest.Equal(that1.ConfigUpdateRequest) { - return false - } - return true -} -func (this *K8SMetadataMessage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*K8SMetadataMessage) - if !ok { - that2, ok := that.(K8SMetadataMessage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if that1.Msg == nil { - if this.Msg != nil { - return false - } - } else if this.Msg == nil { - return false - } else if !this.Msg.Equal(that1.Msg) { - return false - } - return true -} -func (this *K8SMetadataMessage_K8SMetadataUpdate) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*K8SMetadataMessage_K8SMetadataUpdate) - if !ok { - that2, ok := that.(K8SMetadataMessage_K8SMetadataUpdate) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.K8SMetadataUpdate.Equal(that1.K8SMetadataUpdate) { - return false - } - return true -} -func (this *K8SMetadataMessage_MissingK8SMetadataRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*K8SMetadataMessage_MissingK8SMetadataRequest) - if !ok { - that2, ok := that.(K8SMetadataMessage_MissingK8SMetadataRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.MissingK8SMetadataRequest.Equal(that1.MissingK8SMetadataRequest) { + if !this.MissingK8SMetadataRequest.Equal(that1.MissingK8SMetadataRequest) { return false } return true @@ -2631,90 +2220,6 @@ func (this *RemoveTracepointRequest) Equal(that interface{}) bool { } return true } -func (this *RegisterFileSourceRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RegisterFileSourceRequest) - if !ok { - that2, ok := that.(RegisterFileSourceRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.FileSourceDeployment.Equal(that1.FileSourceDeployment) { - return false - } - if !this.ID.Equal(that1.ID) { - return false - } - return true -} -func (this *FileSourceInfoUpdate) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*FileSourceInfoUpdate) - if !ok { - that2, ok := that.(FileSourceInfoUpdate) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.ID.Equal(that1.ID) { - return false - } - if this.State != that1.State { - return false - } - if !this.Status.Equal(that1.Status) { - return false - } - if !this.AgentID.Equal(that1.AgentID) { - return false - } - return true -} -func (this *RemoveFileSourceRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RemoveFileSourceRequest) - if !ok { - that2, ok := that.(RemoveFileSourceRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.ID.Equal(that1.ID) { - return false - } - return true -} func (this *ConfigUpdateRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -2773,7 +2278,7 @@ func (this *VizierMessage) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 14) + s := make([]string, 0, 13) s = append(s, "&messagespb.VizierMessage{") if this.Msg != nil { s = append(s, "Msg: "+fmt.Sprintf("%#v", this.Msg)+",\n") @@ -2853,14 +2358,6 @@ func (this *VizierMessage_K8SMetadataMessage) GoString() string { `K8SMetadataMessage:` + fmt.Sprintf("%#v", this.K8SMetadataMessage) + `}`}, ", ") return s } -func (this *VizierMessage_FileSourceMessage) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&messagespb.VizierMessage_FileSourceMessage{` + - `FileSourceMessage:` + fmt.Sprintf("%#v", this.FileSourceMessage) + `}`}, ", ") - return s -} func (this *TracepointMessage) GoString() string { if this == nil { return "nil" @@ -2897,42 +2394,6 @@ func (this *TracepointMessage_RegisterTracepointRequest) GoString() string { `RegisterTracepointRequest:` + fmt.Sprintf("%#v", this.RegisterTracepointRequest) + `}`}, ", ") return s } -func (this *FileSourceMessage) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&messagespb.FileSourceMessage{") - if this.Msg != nil { - s = append(s, "Msg: "+fmt.Sprintf("%#v", this.Msg)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileSourceMessage_FileSourceInfoUpdate) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&messagespb.FileSourceMessage_FileSourceInfoUpdate{` + - `FileSourceInfoUpdate:` + fmt.Sprintf("%#v", this.FileSourceInfoUpdate) + `}`}, ", ") - return s -} -func (this *FileSourceMessage_RemoveFileSourceRequest) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&messagespb.FileSourceMessage_RemoveFileSourceRequest{` + - `RemoveFileSourceRequest:` + fmt.Sprintf("%#v", this.RemoveFileSourceRequest) + `}`}, ", ") - return s -} -func (this *FileSourceMessage_RegisterFileSourceRequest) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&messagespb.FileSourceMessage_RegisterFileSourceRequest{` + - `RegisterFileSourceRequest:` + fmt.Sprintf("%#v", this.RegisterFileSourceRequest) + `}`}, ", ") - return s -} func (this *ConfigUpdateMessage) GoString() string { if this == nil { return "nil" @@ -3160,52 +2621,6 @@ func (this *RemoveTracepointRequest) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *RegisterFileSourceRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&messagespb.RegisterFileSourceRequest{") - if this.FileSourceDeployment != nil { - s = append(s, "FileSourceDeployment: "+fmt.Sprintf("%#v", this.FileSourceDeployment)+",\n") - } - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileSourceInfoUpdate) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&messagespb.FileSourceInfoUpdate{") - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - } - s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - } - if this.AgentID != nil { - s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RemoveFileSourceRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&messagespb.RemoveFileSourceRequest{") - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} func (this *ConfigUpdateRequest) GoString() string { if this == nil { return "nil" @@ -3457,43 +2872,22 @@ func (m *VizierMessage_K8SMetadataMessage) MarshalToSizedBuffer(dAtA []byte) (in } return len(dAtA) - i, nil } -func (m *VizierMessage_FileSourceMessage) MarshalTo(dAtA []byte) (int, error) { +func (m *TracepointMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TracepointMessage) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VizierMessage_FileSourceMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.FileSourceMessage != nil { - { - size, err := m.FileSourceMessage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - return len(dAtA) - i, nil -} -func (m *TracepointMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TracepointMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TracepointMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TracepointMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3573,101 +2967,6 @@ func (m *TracepointMessage_RegisterTracepointRequest) MarshalToSizedBuffer(dAtA } return len(dAtA) - i, nil } -func (m *FileSourceMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileSourceMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileSourceMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Msg != nil { - { - size := m.Msg.Size() - i -= size - if _, err := m.Msg.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *FileSourceMessage_FileSourceInfoUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileSourceMessage_FileSourceInfoUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.FileSourceInfoUpdate != nil { - { - size, err := m.FileSourceInfoUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *FileSourceMessage_RemoveFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileSourceMessage_RemoveFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RemoveFileSourceRequest != nil { - { - size, err := m.RemoveFileSourceRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *FileSourceMessage_RegisterFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileSourceMessage_RegisterFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RegisterFileSourceRequest != nil { - { - size, err := m.RegisterFileSourceRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} func (m *ConfigUpdateMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4383,152 +3682,6 @@ func (m *RemoveTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *RegisterFileSourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RegisterFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RegisterFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ID != nil { - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.FileSourceDeployment != nil { - { - size, err := m.FileSourceDeployment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FileSourceInfoUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileSourceInfoUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileSourceInfoUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AgentID != nil { - { - size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.State != 0 { - i = encodeVarintMessages(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - if m.ID != nil { - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemoveFileSourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RemoveFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ID != nil { - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMessages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *ConfigUpdateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4734,18 +3887,6 @@ func (m *VizierMessage_K8SMetadataMessage) Size() (n int) { } return n } -func (m *VizierMessage_FileSourceMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FileSourceMessage != nil { - l = m.FileSourceMessage.Size() - n += 1 + l + sovMessages(uint64(l)) - } - return n -} func (m *TracepointMessage) Size() (n int) { if m == nil { return 0 @@ -4794,7 +3935,7 @@ func (m *TracepointMessage_RegisterTracepointRequest) Size() (n int) { } return n } -func (m *FileSourceMessage) Size() (n int) { +func (m *ConfigUpdateMessage) Size() (n int) { if m == nil { return 0 } @@ -4806,62 +3947,14 @@ func (m *FileSourceMessage) Size() (n int) { return n } -func (m *FileSourceMessage_FileSourceInfoUpdate) Size() (n int) { +func (m *ConfigUpdateMessage_ConfigUpdateRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.FileSourceInfoUpdate != nil { - l = m.FileSourceInfoUpdate.Size() - n += 1 + l + sovMessages(uint64(l)) - } - return n -} -func (m *FileSourceMessage_RemoveFileSourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RemoveFileSourceRequest != nil { - l = m.RemoveFileSourceRequest.Size() - n += 1 + l + sovMessages(uint64(l)) - } - return n -} -func (m *FileSourceMessage_RegisterFileSourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RegisterFileSourceRequest != nil { - l = m.RegisterFileSourceRequest.Size() - n += 1 + l + sovMessages(uint64(l)) - } - return n -} -func (m *ConfigUpdateMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Msg != nil { - n += m.Msg.Size() - } - return n -} - -func (m *ConfigUpdateMessage_ConfigUpdateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConfigUpdateRequest != nil { - l = m.ConfigUpdateRequest.Size() + if m.ConfigUpdateRequest != nil { + l = m.ConfigUpdateRequest.Size() n += 1 + l + sovMessages(uint64(l)) } return n @@ -5136,60 +4229,6 @@ func (m *RemoveTracepointRequest) Size() (n int) { return n } -func (m *RegisterFileSourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FileSourceDeployment != nil { - l = m.FileSourceDeployment.Size() - n += 1 + l + sovMessages(uint64(l)) - } - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovMessages(uint64(l)) - } - return n -} - -func (m *FileSourceInfoUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovMessages(uint64(l)) - } - if m.State != 0 { - n += 1 + sovMessages(uint64(m.State)) - } - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovMessages(uint64(l)) - } - if m.AgentID != nil { - l = m.AgentID.Size() - n += 1 + l + sovMessages(uint64(l)) - } - return n -} - -func (m *RemoveFileSourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovMessages(uint64(l)) - } - return n -} - func (m *ConfigUpdateRequest) Size() (n int) { if m == nil { return 0 @@ -5330,16 +4369,6 @@ func (this *VizierMessage_K8SMetadataMessage) String() string { }, "") return s } -func (this *VizierMessage_FileSourceMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VizierMessage_FileSourceMessage{`, - `FileSourceMessage:` + strings.Replace(fmt.Sprintf("%v", this.FileSourceMessage), "FileSourceMessage", "FileSourceMessage", 1) + `,`, - `}`, - }, "") - return s -} func (this *TracepointMessage) String() string { if this == nil { return "nil" @@ -5380,46 +4409,6 @@ func (this *TracepointMessage_RegisterTracepointRequest) String() string { }, "") return s } -func (this *FileSourceMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FileSourceMessage{`, - `Msg:` + fmt.Sprintf("%v", this.Msg) + `,`, - `}`, - }, "") - return s -} -func (this *FileSourceMessage_FileSourceInfoUpdate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FileSourceMessage_FileSourceInfoUpdate{`, - `FileSourceInfoUpdate:` + strings.Replace(fmt.Sprintf("%v", this.FileSourceInfoUpdate), "FileSourceInfoUpdate", "FileSourceInfoUpdate", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FileSourceMessage_RemoveFileSourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FileSourceMessage_RemoveFileSourceRequest{`, - `RemoveFileSourceRequest:` + strings.Replace(fmt.Sprintf("%v", this.RemoveFileSourceRequest), "RemoveFileSourceRequest", "RemoveFileSourceRequest", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FileSourceMessage_RegisterFileSourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FileSourceMessage_RegisterFileSourceRequest{`, - `RegisterFileSourceRequest:` + strings.Replace(fmt.Sprintf("%v", this.RegisterFileSourceRequest), "RegisterFileSourceRequest", "RegisterFileSourceRequest", 1) + `,`, - `}`, - }, "") - return s -} func (this *ConfigUpdateMessage) String() string { if this == nil { return "nil" @@ -5632,40 +4621,6 @@ func (this *RemoveTracepointRequest) String() string { }, "") return s } -func (this *RegisterFileSourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RegisterFileSourceRequest{`, - `FileSourceDeployment:` + strings.Replace(fmt.Sprintf("%v", this.FileSourceDeployment), "FileSourceDeployment", "ir.FileSourceDeployment", 1) + `,`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FileSourceInfoUpdate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FileSourceInfoUpdate{`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "UUID", "uuidpb.UUID", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RemoveFileSourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemoveFileSourceRequest{`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `}`, - }, "") - return s -} func (this *ConfigUpdateRequest) String() string { if this == nil { return "nil" @@ -6040,41 +4995,6 @@ func (m *VizierMessage) Unmarshal(dAtA []byte) error { } m.Msg = &VizierMessage_K8SMetadataMessage{v} iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileSourceMessage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &FileSourceMessage{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Msg = &VizierMessage_FileSourceMessage{v} - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMessages(dAtA[iNdEx:]) @@ -6251,7 +5171,7 @@ func (m *TracepointMessage) Unmarshal(dAtA []byte) error { } return nil } -func (m *FileSourceMessage) Unmarshal(dAtA []byte) error { +func (m *ConfigUpdateMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6274,15 +5194,15 @@ func (m *FileSourceMessage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FileSourceMessage: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigUpdateMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FileSourceMessage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigUpdateMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileSourceInfoUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigUpdateRequest", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6309,15 +5229,65 @@ func (m *FileSourceMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &FileSourceInfoUpdate{} + v := &ConfigUpdateRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Msg = &FileSourceMessage_FileSourceInfoUpdate{v} + m.Msg = &ConfigUpdateMessage_ConfigUpdateRequest{v} iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *K8SMetadataMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: K8sMetadataMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: K8sMetadataMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RemoveFileSourceRequest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field K8SMetadataUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6344,15 +5314,15 @@ func (m *FileSourceMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RemoveFileSourceRequest{} + v := &metadatapb.ResourceUpdate{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Msg = &FileSourceMessage_RemoveFileSourceRequest{v} + m.Msg = &K8SMetadataMessage_K8SMetadataUpdate{v} iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RegisterFileSourceRequest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MissingK8SMetadataRequest", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6379,216 +5349,11 @@ func (m *FileSourceMessage) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RegisterFileSourceRequest{} + v := &metadatapb.MissingK8SMetadataRequest{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Msg = &FileSourceMessage_RegisterFileSourceRequest{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigUpdateMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigUpdateMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigUpdateMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigUpdateRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ConfigUpdateRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Msg = &ConfigUpdateMessage_ConfigUpdateRequest{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *K8SMetadataMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: K8sMetadataMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: K8sMetadataMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field K8SMetadataUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &metadatapb.ResourceUpdate{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Msg = &K8SMetadataMessage_K8SMetadataUpdate{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MissingK8SMetadataRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &metadatapb.MissingK8SMetadataRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Msg = &K8SMetadataMessage_MissingK8SMetadataRequest{v} + m.Msg = &K8SMetadataMessage_MissingK8SMetadataRequest{v} iNdEx = postIndex case 3: if wireType != 2 { @@ -8109,391 +6874,6 @@ func (m *RemoveTracepointRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *RegisterFileSourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RegisterFileSourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RegisterFileSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileSourceDeployment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FileSourceDeployment == nil { - m.FileSourceDeployment = &ir.FileSourceDeployment{} - } - if err := m.FileSourceDeployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ID == nil { - m.ID = &uuidpb.UUID{} - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileSourceInfoUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileSourceInfoUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileSourceInfoUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ID == nil { - m.ID = &uuidpb.UUID{} - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= statuspb.LifeCycleState(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &statuspb.Status{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AgentID == nil { - m.AgentID = &uuidpb.UUID{} - } - if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveFileSourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveFileSourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveFileSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ID == nil { - m.ID = &uuidpb.UUID{} - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ConfigUpdateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/src/vizier/messages/messagespb/messages.proto b/src/vizier/messages/messagespb/messages.proto index 32e61d92dba..92bc4785084 100644 --- a/src/vizier/messages/messagespb/messages.proto +++ b/src/vizier/messages/messagespb/messages.proto @@ -26,7 +26,6 @@ import "gogoproto/gogo.proto"; import "src/api/proto/uuidpb/uuid.proto"; import "src/carnot/planner/distributedpb/distributed_plan.proto"; import "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.proto"; -import "src/carnot/planner/file_source/ir/logical.proto"; import "src/carnot/planpb/plan.proto"; import "src/common/base/statuspb/status.proto"; import "src/shared/k8s/metadatapb/metadata.proto"; @@ -45,7 +44,6 @@ message VizierMessage { TracepointMessage tracepoint_message = 10; ConfigUpdateMessage config_update_message = 11; K8sMetadataMessage k8s_metadata_message = 12; - FileSourceMessage file_source_message = 13; } // DEPRECATED: Formerly used for UpdateAgentRequest. reserved 3; @@ -62,15 +60,6 @@ message TracepointMessage { } } -// A wrapper around all file source-related messages that can be sent over the message bus. -message FileSourceMessage { - oneof msg { - FileSourceInfoUpdate file_source_info_update = 1; - RemoveFileSourceRequest remove_file_source_request = 2; - RegisterFileSourceRequest register_file_source_request = 3; - } -} - // A wrapper around all PEM-config-related messages that can be sent over the message bus. message ConfigUpdateMessage { oneof msg { @@ -183,27 +172,6 @@ message RemoveTracepointRequest { uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; } -// The request to register file sources on a PEM. -message RegisterFileSourceRequest { - px.carnot.planner.file_source.ir.FileSourceDeployment file_source_deployment = 1; - uuidpb.UUID id = 2 [ (gogoproto.customname) = "ID" ]; -} - -// An update message sent when a file source's status changes. -message FileSourceInfoUpdate { - uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; - // The state of the file source. - px.statuspb.LifeCycleState state = 2; - // The status of the file source, specified if the state of the file source is not healthy. - px.statuspb.Status status = 3; - // The ID of the agent sending the update. - uuidpb.UUID agent_id = 4 [ (gogoproto.customname) = "AgentID" ]; -} - -message RemoveFileSourceRequest { - uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; -} - // A request to update a config setting on a PEM. message ConfigUpdateRequest { // The key of the setting that should be updated. diff --git a/src/vizier/services/agent/kelvin/kelvin_manager.h b/src/vizier/services/agent/kelvin/kelvin_manager.h index 2c2959736f4..51b0c2fc993 100644 --- a/src/vizier/services/agent/kelvin/kelvin_manager.h +++ b/src/vizier/services/agent/kelvin/kelvin_manager.h @@ -60,7 +60,6 @@ class KelvinManager : public Manager { static services::shared::agent::AgentCapabilities Capabilities() { services::shared::agent::AgentCapabilities capabilities; capabilities.set_collects_data(false); - capabilities.set_stores_data(true); return capabilities; } diff --git a/src/vizier/services/agent/pem/file_source_manager.cc b/src/vizier/services/agent/pem/file_source_manager.cc deleted file mode 100644 index 650ae2f85a6..00000000000 --- a/src/vizier/services/agent/pem/file_source_manager.cc +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include - -#include "src/common/base/base.h" -#include "src/vizier/services/agent/pem/file_source_manager.h" - -constexpr auto kUpdateInterval = std::chrono::seconds(2); - -namespace px { -namespace vizier { -namespace agent { - -FileSourceManager::FileSourceManager(px::event::Dispatcher* dispatcher, Info* agent_info, - Manager::VizierNATSConnector* nats_conn, - stirling::Stirling* stirling, - table_store::TableStore* table_store, - RelationInfoManager* relation_info_manager) - : MessageHandler(dispatcher, agent_info, nats_conn), - dispatcher_(dispatcher), - nats_conn_(nats_conn), - stirling_(stirling), - table_store_(table_store), - relation_info_manager_(relation_info_manager) { - file_source_monitor_timer_ = - dispatcher_->CreateTimer(std::bind(&FileSourceManager::Monitor, this)); - // Kick off the background monitor. - file_source_monitor_timer_->EnableTimer(kUpdateInterval); -} - -Status FileSourceManager::HandleMessage(std::unique_ptr msg) { - // The main purpose of handle message is to update the local state based on updates - // from the MDS. - if (!msg->has_file_source_message()) { - return error::InvalidArgument("Can only handle file source requests"); - } - - const messages::FileSourceMessage& file_source = msg->file_source_message(); - switch (file_source.msg_case()) { - case messages::FileSourceMessage::kRegisterFileSourceRequest: { - return HandleRegisterFileSourceRequest(file_source.register_file_source_request()); - } - case messages::FileSourceMessage::kRemoveFileSourceRequest: { - return HandleRemoveFileSourceRequest(file_source.remove_file_source_request()); - } - default: - LOG(ERROR) << "Unknown message type: " << file_source.msg_case() << " skipping"; - } - return Status::OK(); -} - -std::string FileSourceManager::DebugString() const { - std::lock_guard lock(mu_); - std::stringstream ss; - auto now = std::chrono::steady_clock::now(); - ss << absl::Substitute("File Source Manager Debug State:\n"); - ss << absl::Substitute("ID\tNAME\tCURRENT_STATE\tEXPECTED_STATE\tlast_updated\n"); - for (const auto& [id, file_source] : file_sources_) { - ss << absl::Substitute( - "$0\t$1\t$2\t$3\t$4 seconds\n", id.str(), file_source.name, - statuspb::LifeCycleState_Name(file_source.current_state), - statuspb::LifeCycleState_Name(file_source.expected_state), - std::chrono::duration_cast(now - file_source.last_updated_at) - .count()); - } - return ss.str(); -} - -Status FileSourceManager::HandleRegisterFileSourceRequest( - const messages::RegisterFileSourceRequest& req) { - auto glob_pattern = req.file_source_deployment().glob_pattern(); - PX_ASSIGN_OR_RETURN(auto id, ParseUUID(req.id())); - LOG(INFO) << "Registering file source: " << glob_pattern << " uuid string=" << id.str(); - - FileSourceInfo info; - info.name = glob_pattern; - info.id = id; - info.expected_state = statuspb::RUNNING_STATE; - info.current_state = statuspb::PENDING_STATE; - info.last_updated_at = dispatcher_->GetTimeSource().MonotonicTime(); - stirling_->RegisterFileSource(id, glob_pattern); - { - std::lock_guard lock(mu_); - file_sources_[id] = std::move(info); - } - return Status::OK(); -} - -Status FileSourceManager::HandleRemoveFileSourceRequest( - const messages::RemoveFileSourceRequest& req) { - PX_ASSIGN_OR_RETURN(auto id, ParseUUID(req.id())); - std::lock_guard lock(mu_); - auto it = file_sources_.find(id); - if (it == file_sources_.end()) { - return error::NotFound("File source with ID: $0, not found", id.str()); - } - - it->second.expected_state = statuspb::TERMINATED_STATE; - return stirling_->RemoveFileSource(id); -} - -void FileSourceManager::Monitor() { - std::lock_guard lock(mu_); - - for (auto& [id, file_source] : file_sources_) { - auto s_or_publish = stirling_->GetFileSourceInfo(id); - statuspb::LifeCycleState current_state; - // Get the latest current state according to stirling. - if (s_or_publish.ok()) { - current_state = statuspb::RUNNING_STATE; - } else { - switch (s_or_publish.code()) { - case statuspb::FAILED_PRECONDITION: - // Means the binary has not been found. - current_state = statuspb::FAILED_STATE; - break; - case statuspb::RESOURCE_UNAVAILABLE: - current_state = statuspb::PENDING_STATE; - break; - case statuspb::NOT_FOUND: - // Means we didn't actually find the probe. If we requested termination, - // it's because the probe has been removed. - current_state = (file_source.expected_state == statuspb::TERMINATED_STATE) - ? statuspb::TERMINATED_STATE - : statuspb::UNKNOWN_STATE; - break; - default: - current_state = statuspb::FAILED_STATE; - break; - } - } - - if (current_state != statuspb::RUNNING_STATE && - file_source.expected_state == statuspb::TERMINATED_STATE) { - current_state = statuspb::TERMINATED_STATE; - } - - if (current_state == file_source.current_state) { - // No state transition, nothing to do. - continue; - } - - // The following transitions are legal: - // 1. Pending -> Terminated: Probe is stopped before starting. - // 2. Pending -> Running : Probe starts up. - // 3. Running -> Terminated: Probe is stopped. - // 4. Running -> Failed: Probe got dettached because binary died. - // 5. Failed -> Running: Probe started up because binary came back to life. - // - // In all cases we basically inform the MDS. - // In the cases where we transition to running, we need to update the schemas. - - Status probe_status = Status::OK(); - LOG(INFO) << absl::Substitute("File source[$0]::$1 has transitioned $2 -> $3", id.str(), - file_source.name, - statuspb::LifeCycleState_Name(file_source.current_state), - statuspb::LifeCycleState_Name(current_state)); - // Check if running now, then update the schema. - if (current_state == statuspb::RUNNING_STATE) { - // We must have just transitioned into running. We try to apply the new schema. - // If it fails we will trigger an error and report that to MDS. - auto publish_pb = s_or_publish.ConsumeValueOrDie(); - auto s = UpdateSchema(publish_pb); - if (!s.ok()) { - current_state = statuspb::FAILED_STATE; - probe_status = s; - } - } else { - probe_status = s_or_publish.status(); - } - - file_source.current_state = current_state; - - // Update MDS with the latest status. - px::vizier::messages::VizierMessage msg; - auto file_source_msg = msg.mutable_file_source_message(); - auto update_msg = file_source_msg->mutable_file_source_info_update(); - ToProto(agent_info()->agent_id, update_msg->mutable_agent_id()); - ToProto(id, update_msg->mutable_id()); - update_msg->set_state(file_source.current_state); - probe_status.ToProto(update_msg->mutable_status()); - VLOG(1) << "Sending file source info update message: " << msg.DebugString(); - auto s = nats_conn_->Publish(msg); - if (!s.ok()) { - LOG(ERROR) << "Failed to update nats"; - } - } - file_source_monitor_timer_->EnableTimer(kUpdateInterval); -} - -Status FileSourceManager::UpdateSchema(const stirling::stirlingpb::Publish& publish_pb) { - LOG(INFO) << "Updating schema for file source"; - auto relation_info_vec = ConvertPublishPBToRelationInfo(publish_pb); - // TODO(zasgar): Failure here can lead to an inconsistent schema state. We should - // figure out how to handle this as part of the data model refactor project. - for (const auto& relation_info : relation_info_vec) { - if (!relation_info_manager_->HasRelation(relation_info.name)) { - table_store_->AddTable( - table_store::HotColdTable::Create(relation_info.name, relation_info.relation), - relation_info.name, relation_info.id); - PX_RETURN_IF_ERROR(relation_info_manager_->AddRelationInfo(relation_info)); - } else { - if (relation_info.relation != table_store_->GetTable(relation_info.name)->GetRelation()) { - return error::Internal( - "File source is not compatible with the schema of the specified output table. " - "[table_name=$0]", - relation_info.name); - } - PX_RETURN_IF_ERROR(table_store_->AddTableAlias(relation_info.id, relation_info.name)); - } - } - return Status::OK(); -} - -} // namespace agent -} // namespace vizier -} // namespace px diff --git a/src/vizier/services/agent/pem/file_source_manager.h b/src/vizier/services/agent/pem/file_source_manager.h deleted file mode 100644 index f45d346f5f2..00000000000 --- a/src/vizier/services/agent/pem/file_source_manager.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include -#include - -#include - -#include "src/stirling/stirling.h" -#include "src/vizier/services/agent/shared/manager/manager.h" - -namespace px { -namespace vizier { -namespace agent { - -struct FileSourceInfo { - std::string name; - sole::uuid id; - statuspb::LifeCycleState expected_state; - statuspb::LifeCycleState current_state; - std::chrono::time_point last_updated_at; -}; - -class FileSourceManager : public Manager::MessageHandler { - public: - FileSourceManager() = delete; - FileSourceManager(px::event::Dispatcher* dispatcher, Info* agent_info, - Manager::VizierNATSConnector* nats_conn, stirling::Stirling* stirling, - table_store::TableStore* table_store, - RelationInfoManager* relation_info_manager); - - Status HandleMessage(std::unique_ptr msg) override; - std::string DebugString() const; - Status HandleRegisterFileSourceRequest(const messages::RegisterFileSourceRequest& req); - Status HandleRemoveFileSourceRequest(const messages::RemoveFileSourceRequest& req); - - private: - // The tracepoint Monitor that is responsible for watching and updating the state of - // active tracepoints. - void Monitor(); - Status UpdateSchema(const stirling::stirlingpb::Publish& publish_proto); - - px::event::Dispatcher* dispatcher_; - Manager::VizierNATSConnector* nats_conn_; - stirling::Stirling* stirling_; - table_store::TableStore* table_store_; - RelationInfoManager* relation_info_manager_; - - event::TimerUPtr file_source_monitor_timer_; - mutable std::mutex mu_; - absl::flat_hash_map file_sources_; -}; - -} // namespace agent -} // namespace vizier -} // namespace px diff --git a/src/vizier/services/agent/pem/pem_manager.cc b/src/vizier/services/agent/pem/pem_manager.cc index c73444b9b6c..ff9f1e0ffad 100644 --- a/src/vizier/services/agent/pem/pem_manager.cc +++ b/src/vizier/services/agent/pem/pem_manager.cc @@ -78,11 +78,6 @@ Status PEMManager::PostRegisterHookImpl() { stirling_.get(), table_store(), relation_info_manager()); PX_RETURN_IF_ERROR(RegisterMessageHandler(messages::VizierMessage::MsgCase::kTracepointMessage, tracepoint_manager_)); - file_source_manager_ = - std::make_shared(dispatcher(), info(), agent_nats_connector(), - stirling_.get(), table_store(), relation_info_manager()); - PX_RETURN_IF_ERROR(RegisterMessageHandler(messages::VizierMessage::MsgCase::kFileSourceMessage, - file_source_manager_)); return Status::OK(); } @@ -150,20 +145,20 @@ Status PEMManager::InitSchemas() { // Special case to set the max size of the http_events table differently from the other // tables. For now, the min cold batch size is set to 256kB to be consistent with previous // behaviour. - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, http_table_size, 256 * 1024); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + http_table_size, 256 * 1024); } else if (relation_info.name == "stirling_error") { - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, stirling_error_table_size); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + stirling_error_table_size); } else if (relation_info.name == "probe_status") { - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, probe_status_table_size); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + probe_status_table_size); } else if (relation_info.name == "proc_exit_events") { - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, proc_exit_events_table_size); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + proc_exit_events_table_size); } else { - table_ptr = std::make_shared( - relation_info.name, relation_info.relation, other_table_size); + table_ptr = std::make_shared(relation_info.name, relation_info.relation, + other_table_size); } table_store()->AddTable(std::move(table_ptr), relation_info.name, relation_info.id); diff --git a/src/vizier/services/agent/pem/pem_manager.h b/src/vizier/services/agent/pem/pem_manager.h index d9c138355d9..9dcbab9b4f9 100644 --- a/src/vizier/services/agent/pem/pem_manager.h +++ b/src/vizier/services/agent/pem/pem_manager.h @@ -28,7 +28,6 @@ #include "src/common/system/kernel_version.h" #include "src/stirling/stirling.h" -#include "src/vizier/services/agent/pem/file_source_manager.h" #include "src/vizier/services/agent/pem/tracepoint_manager.h" #include "src/vizier/services/agent/shared/manager/manager.h" @@ -105,7 +104,6 @@ class PEMManager : public Manager { std::unique_ptr stirling_; std::shared_ptr tracepoint_manager_; - std::shared_ptr file_source_manager_; // Timer for triggering ClockConverter polls. px::event::TimerUPtr clock_converter_timer_; diff --git a/src/vizier/services/agent/pem/tracepoint_manager.cc b/src/vizier/services/agent/pem/tracepoint_manager.cc index 65a18370bd7..3c7453c0313 100644 --- a/src/vizier/services/agent/pem/tracepoint_manager.cc +++ b/src/vizier/services/agent/pem/tracepoint_manager.cc @@ -204,7 +204,6 @@ void TracepointManager::Monitor() { ToProto(id, update_msg->mutable_id()); update_msg->set_state(tracepoint.current_state); probe_status.ToProto(update_msg->mutable_status()); - VLOG(1) << "Sending tracepoint info update message: " << msg.DebugString(); auto s = nats_conn_->Publish(msg); if (!s.ok()) { LOG(ERROR) << "Failed to update nats"; @@ -220,9 +219,8 @@ Status TracepointManager::UpdateSchema(const stirling::stirlingpb::Publish& publ // figure out how to handle this as part of the data model refactor project. for (const auto& relation_info : relation_info_vec) { if (!relation_info_manager_->HasRelation(relation_info.name)) { - table_store_->AddTable( - table_store::HotColdTable::Create(relation_info.name, relation_info.relation), - relation_info.name, relation_info.id); + table_store_->AddTable(table_store::Table::Create(relation_info.name, relation_info.relation), + relation_info.name, relation_info.id); PX_RETURN_IF_ERROR(relation_info_manager_->AddRelationInfo(relation_info)); } else { if (relation_info.relation != table_store_->GetTable(relation_info.name)->GetRelation()) { diff --git a/src/vizier/services/agent/pem/tracepoint_manager_test.cc b/src/vizier/services/agent/pem/tracepoint_manager_test.cc index 9cd85aed2af..fd54f7badb2 100644 --- a/src/vizier/services/agent/pem/tracepoint_manager_test.cc +++ b/src/vizier/services/agent/pem/tracepoint_manager_test.cc @@ -115,8 +115,8 @@ TEST_F(TracepointManagerTest, CreateTracepoint) { tracepoint->set_name("test_tracepoint"); EXPECT_CALL(stirling_, - RegisterTracepoint(tracepoint_id, ::testing::Pointee(testing::proto::EqualsProto( - tracepoint->DebugString())))); + RegisterTracepoint(tracepoint_id, + ::testing::Pointee(testing::proto::EqualsProto(*tracepoint)))); EXPECT_OK(tracepoint_manager_->HandleMessage(std::move(msg))); EXPECT_CALL(stirling_, GetTracepointInfo(tracepoint_id)) @@ -152,8 +152,8 @@ TEST_F(TracepointManagerTest, CreateTracepointFailed) { tracepoint->set_name("test_tracepoint"); EXPECT_CALL(stirling_, - RegisterTracepoint(tracepoint_id, ::testing::Pointee(testing::proto::EqualsProto( - tracepoint->DebugString())))); + RegisterTracepoint(tracepoint_id, + ::testing::Pointee(testing::proto::EqualsProto(*tracepoint)))); EXPECT_OK(tracepoint_manager_->HandleMessage(std::move(msg))); EXPECT_CALL(stirling_, GetTracepointInfo(tracepoint_id)) @@ -185,8 +185,8 @@ TEST_F(TracepointManagerTest, CreateTracepointPreconditionFailed) { tracepoint->set_name("test_tracepoint"); EXPECT_CALL(stirling_, - RegisterTracepoint(tracepoint_id, ::testing::Pointee(testing::proto::EqualsProto( - tracepoint->DebugString())))); + RegisterTracepoint(tracepoint_id, + ::testing::Pointee(testing::proto::EqualsProto(*tracepoint)))); EXPECT_OK(tracepoint_manager_->HandleMessage(std::move(msg))); EXPECT_CALL(stirling_, GetTracepointInfo(tracepoint_id)) diff --git a/src/vizier/services/agent/shared/manager/BUILD.bazel b/src/vizier/services/agent/shared/manager/BUILD.bazel index 2bf527935d4..7ba7ff6b8cc 100644 --- a/src/vizier/services/agent/shared/manager/BUILD.bazel +++ b/src/vizier/services/agent/shared/manager/BUILD.bazel @@ -42,7 +42,6 @@ pl_cc_library( "//src/vizier/funcs:cc_library", "//src/vizier/messages/messagespb:messages_pl_cc_proto", "//src/vizier/services/agent/shared/base:cc_library", - "//src/stirling/source_connectors/stirling_error:cc_library", "//third_party:natsc", "@com_github_arun11299_cpp_jwt//:cpp_jwt", "@com_github_cameron314_concurrentqueue//:concurrentqueue", diff --git a/src/vizier/services/agent/shared/manager/chan_cache.h b/src/vizier/services/agent/shared/manager/chan_cache.h index 00106a2e2f4..6520a1c03a9 100644 --- a/src/vizier/services/agent/shared/manager/chan_cache.h +++ b/src/vizier/services/agent/shared/manager/chan_cache.h @@ -83,7 +83,7 @@ class ChanCache { }; // The cache of channels (grpc conns) made to other agents. - absl::flat_hash_map chan_cache_ GUARDED_BY(chan_cache_lock_); + absl::flat_hash_map chan_cache_ ABSL_GUARDED_BY(chan_cache_lock_); absl::base_internal::SpinLock chan_cache_lock_; // Connections that are alive for shorter than warm_up_period_ won't be cleared. std::chrono::nanoseconds warm_up_period_; diff --git a/src/vizier/services/agent/shared/manager/heartbeat.cc b/src/vizier/services/agent/shared/manager/heartbeat.cc index 0f0e77aeef5..4b48c5c68a6 100644 --- a/src/vizier/services/agent/shared/manager/heartbeat.cc +++ b/src/vizier/services/agent/shared/manager/heartbeat.cc @@ -100,8 +100,7 @@ Status HeartbeatMessageHandler::SendHeartbeatInternal() { auto* update_info = hb->mutable_update_info(); ConsumeAgentPIDUpdates(update_info); - auto capabilities = agent_info()->capabilities; - if ((capabilities.collects_data() || capabilities.stores_data()) && + if (agent_info()->capabilities.collects_data() && (!sent_schema_ || relation_info_manager_->has_updates())) { sent_schema_ = true; relation_info_manager_->AddSchemaToUpdateInfo(update_info); diff --git a/src/vizier/services/agent/shared/manager/heartbeat.h b/src/vizier/services/agent/shared/manager/heartbeat.h index 50361997854..ea7a88dd352 100644 --- a/src/vizier/services/agent/shared/manager/heartbeat.h +++ b/src/vizier/services/agent/shared/manager/heartbeat.h @@ -21,19 +21,12 @@ #include #include "src/vizier/services/agent/shared/manager/manager.h" -#include "src/table_store/table_store.h" -#include "src/shared/schema/utils.h" -#include "src/stirling/source_connectors/stirling_error/sink_results_table.h" -#include "src/stirling/core/pub_sub_manager.h" namespace px { namespace vizier { namespace agent { class HeartbeatMessageHandler : public Manager::MessageHandler { - - const std::string kSinkResultsTableName = "sink_results"; - public: HeartbeatMessageHandler() = delete; HeartbeatMessageHandler(px::event::Dispatcher* dispatcher, @@ -47,20 +40,6 @@ class HeartbeatMessageHandler : public Manager::MessageHandler { void DisableHeartbeats(); void EnableHeartbeats(); - Status CreateSinkResultsTable(table_store::TableStore* table_store) { - auto mgr = std::make_unique(stirling::kSinkResultsTable); - std::vector> mgrs; - mgrs.push_back(std::move(mgr)); - stirling::stirlingpb::Publish publish_pb; - PopulatePublishProto(&publish_pb, mgrs); - auto relation_info_vec = ConvertPublishPBToRelationInfo(publish_pb); - auto relation_info = relation_info_vec[0]; - auto table = table_store::HotColdTable::Create(relation_info.name, relation_info.relation); - table_store->AddTable(std::move(table), relation_info.name, relation_info.id); - PX_RETURN_IF_ERROR(relation_info_manager_->AddRelationInfo(relation_info)); - return Status::OK(); - } - private: void ConsumeAgentPIDUpdates(messages::AgentUpdateInfo* update_info); void ProcessPIDStartedEvent(const px::md::PIDStartedEvent& ev, diff --git a/src/vizier/services/agent/shared/manager/heartbeat_test.cc b/src/vizier/services/agent/shared/manager/heartbeat_test.cc index 666c3223f15..249a34ea1fc 100644 --- a/src/vizier/services/agent/shared/manager/heartbeat_test.cc +++ b/src/vizier/services/agent/shared/manager/heartbeat_test.cc @@ -114,10 +114,10 @@ class HeartbeatMessageHandlerTest : public ::testing::Test { // Relation info with no tabletization. Relation relation0({types::TIME64NS, types::INT64}, {"time_", "count"}); - RelationInfo relation_info0("relation0", /* id */ 0, "desc0", std::nullopt, relation0); + RelationInfo relation_info0("relation0", /* id */ 0, "desc0", relation0); // Relation info with no tabletization. Relation relation1({types::TIME64NS, types::FLOAT64}, {"time_", "gauge"}); - RelationInfo relation_info1("relation1", /* id */ 1, "desc1", std::nullopt, relation1); + RelationInfo relation_info1("relation1", /* id */ 1, "desc1", relation1); std::vector relation_info_vec({relation_info0, relation_info1}); // Pass relation info to the manager. relation_info_manager_ = std::make_unique(); @@ -299,7 +299,7 @@ TEST_F(HeartbeatMessageHandlerTest, HandleHeartbeatRelationUpdates) { auto s = heartbeat_handler_->HandleMessage(std::move(hb_ack)); Relation relation2({types::TIME64NS, types::FLOAT64}, {"time_", "gauge"}); - RelationInfo relation_info2("relation2", /* id */ 1, "desc2", std::nullopt, relation2); + RelationInfo relation_info2("relation2", /* id */ 1, "desc2", relation2); s = relation_info_manager_->AddRelationInfo(relation_info2); time_system_->Sleep(std::chrono::milliseconds(5 * 5000 + 1)); diff --git a/src/vizier/services/agent/shared/manager/manager.cc b/src/vizier/services/agent/shared/manager/manager.cc index 01efe60044b..004eb5ba2ea 100644 --- a/src/vizier/services/agent/shared/manager/manager.cc +++ b/src/vizier/services/agent/shared/manager/manager.cc @@ -87,13 +87,6 @@ Manager::MDTPServiceSPtr CreateMDTPStub(const std::shared_ptr& ch return std::make_shared(chan); } -Manager::MDFSServiceSPtr CreateMDFSStub(const std::shared_ptr& chan) { - if (chan == nullptr) { - return nullptr; - } - return std::make_shared(chan); -} - std::shared_ptr CreateCronScriptStub( const std::shared_ptr& chan) { if (chan == nullptr) { @@ -115,7 +108,7 @@ Manager::Manager(sole::uuid agent_id, std::string_view pod_name, std::string_vie relation_info_manager_(std::make_unique()), mds_channel_(grpc::CreateChannel(std::string(mds_url), grpc_channel_creds_)), func_context_(this, CreateMDSStub(mds_channel_), CreateMDTPStub(mds_channel_), - CreateMDFSStub(mds_channel_), CreateCronScriptStub(mds_channel_), table_store_, + CreateCronScriptStub(mds_channel_), table_store_, [](grpc::ClientContext* ctx) { AddServiceTokenToClientContext(ctx); }), memory_metrics_(&GetMetricsRegistry(), "agent_id", agent_id.str()) { // Register Vizier specific and carnot builtin functions. @@ -236,10 +229,6 @@ Status Manager::RegisterBackgroundHelpers() { heartbeat_handler_ = std::make_shared( dispatcher_.get(), mds_manager_.get(), relation_info_manager_.get(), &info_, agent_nats_connector_.get()); - if (info_.capabilities.stores_data()) { - LOG(INFO) << "Creating results table"; - PX_RETURN_IF_ERROR(heartbeat_handler_->CreateSinkResultsTable(table_store())); - } auto heartbeat_nack_handler = std::make_shared( dispatcher_.get(), &info_, agent_nats_connector_.get(), @@ -299,11 +288,8 @@ Status Manager::PostRegisterHook(uint32_t asid) { LOG_IF(FATAL, info_.asid != 0) << "Attempted to register existing agent with new ASID"; info_.asid = asid; - const std::string proc_pid_path = std::string("/proc/") + std::to_string(info_.pid); - PX_ASSIGN_OR_RETURN(auto start_time, system::GetPIDStartTimeTicks(proc_pid_path)); - mds_manager_ = std::make_unique( - info_.hostname, info_.asid, info_.pid, start_time, info_.pod_name, info_.agent_id, + info_.hostname, info_.asid, info_.pid, info_.pod_name, info_.agent_id, info_.capabilities.collects_data(), px::system::Config::GetInstance(), agent_metadata_filter_.get(), sole::rebuild(FLAGS_vizier_id), FLAGS_vizier_name, FLAGS_vizier_namespace, time_system_.get()); diff --git a/src/vizier/services/agent/shared/manager/manager.h b/src/vizier/services/agent/shared/manager/manager.h index af2cd912a5a..3d7a8a4f49e 100644 --- a/src/vizier/services/agent/shared/manager/manager.h +++ b/src/vizier/services/agent/shared/manager/manager.h @@ -92,8 +92,6 @@ class Manager : public BaseManager { using MDSServiceSPtr = std::shared_ptr; using MDTPService = services::metadata::MetadataTracepointService; using MDTPServiceSPtr = std::shared_ptr; - using MDFSService = services::metadata::MetadataFileSourceService; - using MDFSServiceSPtr = std::shared_ptr; using ResultSinkStub = px::carnotpb::ResultSinkService::StubInterface; Manager() = delete; diff --git a/src/vizier/services/agent/shared/manager/relation_info_manager.cc b/src/vizier/services/agent/shared/manager/relation_info_manager.cc index d227978224c..cb3fc51ea8b 100644 --- a/src/vizier/services/agent/shared/manager/relation_info_manager.cc +++ b/src/vizier/services/agent/shared/manager/relation_info_manager.cc @@ -54,9 +54,6 @@ void RelationInfoManager::AddSchemaToUpdateInfo(messages::AgentUpdateInfo* updat schema->set_tabletized(relation_info.tabletized); schema->set_tabletization_key(relation.GetColumnName(relation_info.tabletization_key_idx)); } - if (relation_info.mutation_id.has_value()) { - schema->set_mutation_id(relation_info.mutation_id.value()); - } for (size_t i = 0; i < relation.NumColumns(); ++i) { auto* column = schema->add_columns(); column->set_name(relation.GetColumnName(i)); diff --git a/src/vizier/services/agent/shared/manager/relation_info_manager.h b/src/vizier/services/agent/shared/manager/relation_info_manager.h index 10c05039328..f4cf1080e3d 100644 --- a/src/vizier/services/agent/shared/manager/relation_info_manager.h +++ b/src/vizier/services/agent/shared/manager/relation_info_manager.h @@ -65,7 +65,8 @@ class RelationInfoManager { private: mutable std::atomic has_updates_ = false; mutable absl::base_internal::SpinLock relation_info_map_lock_; - absl::btree_map relation_info_map_ GUARDED_BY(relation_info_map_lock_); + absl::btree_map relation_info_map_ + ABSL_GUARDED_BY(relation_info_map_lock_); }; } // namespace agent diff --git a/src/vizier/services/agent/shared/manager/relation_info_manager_test.cc b/src/vizier/services/agent/shared/manager/relation_info_manager_test.cc index abeb919847c..7f9a06c750c 100644 --- a/src/vizier/services/agent/shared/manager/relation_info_manager_test.cc +++ b/src/vizier/services/agent/shared/manager/relation_info_manager_test.cc @@ -75,11 +75,11 @@ schema { TEST_F(RelationInfoManagerTest, test_update) { // Relation info with no tabletization. Relation relation0({types::TIME64NS, types::INT64}, {"time_", "count"}); - RelationInfo relation_info0("relation0", /* id */ 0, "desc0", std::nullopt, relation0); + RelationInfo relation_info0("relation0", /* id */ 0, "desc0", relation0); // Relation info with no tabletization. Relation relation1({types::TIME64NS, types::FLOAT64}, {"time_", "gauge"}); - RelationInfo relation_info1("relation1", /* id */ 1, "desc1", std::nullopt, relation1); + RelationInfo relation_info1("relation1", /* id */ 1, "desc1", relation1); EXPECT_OK(relation_info_manager_->AddRelationInfo(std::move(relation_info0))); EXPECT_OK(relation_info_manager_->AddRelationInfo(std::move(relation_info1))); @@ -131,12 +131,12 @@ schema { TEST_F(RelationInfoManagerTest, test_tabletization_keys) { // Relation info with no tabletization. Relation relation0({types::TIME64NS, types::INT64}, {"time_", "count"}); - RelationInfo relation_info0("relation0", /* id */ 0, "desc0", std::nullopt, relation0); + RelationInfo relation_info0("relation0", /* id */ 0, "desc0", relation0); // Relation info with a tablet key ("upid"). Relation relation1({types::TIME64NS, types::UINT128, types::INT64}, {"time_", "upid", "count"}); RelationInfo relation_info1("relation1", /* id */ 1, "desc1", /* tabletization_key_idx */ 1, - std::nullopt, relation1); + relation1); EXPECT_FALSE(relation_info_manager_->has_updates()); diff --git a/src/vizier/services/metadata/BUILD.bazel b/src/vizier/services/metadata/BUILD.bazel index f885bd1c777..9d52501dcd2 100644 --- a/src/vizier/services/metadata/BUILD.bazel +++ b/src/vizier/services/metadata/BUILD.bazel @@ -33,7 +33,6 @@ go_library( "//src/vizier/services/metadata/controllers", "//src/vizier/services/metadata/controllers/agent", "//src/vizier/services/metadata/controllers/cronscript", - "//src/vizier/services/metadata/controllers/file_source", "//src/vizier/services/metadata/controllers/k8smeta", "//src/vizier/services/metadata/controllers/tracepoint", "//src/vizier/services/metadata/metadataenv", diff --git a/src/vizier/services/metadata/controllers/BUILD.bazel b/src/vizier/services/metadata/controllers/BUILD.bazel index ca6fe64f35a..0fd8cc0fee5 100644 --- a/src/vizier/services/metadata/controllers/BUILD.bazel +++ b/src/vizier/services/metadata/controllers/BUILD.bazel @@ -35,7 +35,6 @@ go_library( "//src/utils", "//src/vizier/messages/messagespb:messages_pl_go_proto", "//src/vizier/services/metadata/controllers/agent", - "//src/vizier/services/metadata/controllers/file_source", "//src/vizier/services/metadata/controllers/k8smeta", "//src/vizier/services/metadata/controllers/tracepoint", "//src/vizier/services/metadata/metadataenv", @@ -79,8 +78,6 @@ pl_go_test( "//src/vizier/messages/messagespb:messages_pl_go_proto", "//src/vizier/services/metadata/controllers/agent", "//src/vizier/services/metadata/controllers/agent/mock", - "//src/vizier/services/metadata/controllers/file_source", - "//src/vizier/services/metadata/controllers/file_source/mock", "//src/vizier/services/metadata/controllers/testutils", "//src/vizier/services/metadata/controllers/tracepoint", "//src/vizier/services/metadata/controllers/tracepoint/mock", diff --git a/src/vizier/services/metadata/controllers/agent_topic_listener.go b/src/vizier/services/metadata/controllers/agent_topic_listener.go index 13743e6ba6f..e8b72cfa463 100644 --- a/src/vizier/services/metadata/controllers/agent_topic_listener.go +++ b/src/vizier/services/metadata/controllers/agent_topic_listener.go @@ -32,7 +32,6 @@ import ( "px.dev/pixie/src/utils" "px.dev/pixie/src/vizier/messages/messagespb" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" - "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" "px.dev/pixie/src/vizier/services/shared/agentpb" "px.dev/pixie/src/vizier/utils/messagebus" @@ -81,7 +80,6 @@ func (c *concurrentAgentMap) delete(agentID uuid.UUID) { type AgentTopicListener struct { agtMgr agent.Manager tpMgr *tracepoint.Manager - fsMgr *file_source.Manager sendMessage SendMessageFn // Map from agent ID -> the agentHandler that's responsible for handling that particular @@ -94,7 +92,6 @@ type AgentHandler struct { id uuid.UUID agtMgr agent.Manager tpMgr *tracepoint.Manager - fsMgr *file_source.Manager atl *AgentTopicListener MsgChannel chan *nats.Msg @@ -106,12 +103,11 @@ type AgentHandler struct { // NewAgentTopicListener creates a new agent topic listener. func NewAgentTopicListener(agtMgr agent.Manager, tpMgr *tracepoint.Manager, - fsMgr *file_source.Manager, - sendMsgFn SendMessageFn) (*AgentTopicListener, error) { + sendMsgFn SendMessageFn, +) (*AgentTopicListener, error) { atl := &AgentTopicListener{ agtMgr: agtMgr, tpMgr: tpMgr, - fsMgr: fsMgr, sendMessage: sendMsgFn, agentMap: &concurrentAgentMap{unsafeMap: make(map[uuid.UUID]*AgentHandler)}, } @@ -166,8 +162,6 @@ func (a *AgentTopicListener) HandleMessage(msg *nats.Msg) error { a.forwardAgentRegisterRequest(m.RegisterAgentRequest, msg) case *messagespb.VizierMessage_TracepointMessage: a.onAgentTracepointMessage(m.TracepointMessage) - case *messagespb.VizierMessage_FileSourceMessage: - a.onAgentFileSourceMessage(m.FileSourceMessage) default: log.WithField("message-type", reflect.TypeOf(pb.Msg).String()). Error("Unhandled message.") @@ -197,7 +191,6 @@ func (a *AgentTopicListener) createAgentHandler(agentID uuid.UUID) *AgentHandler id: agentID, agtMgr: a.agtMgr, tpMgr: a.tpMgr, - fsMgr: a.fsMgr, atl: a, MsgChannel: make(chan *nats.Msg, 10), quitCh: make(chan struct{}), @@ -299,23 +292,6 @@ func (a *AgentTopicListener) onAgentTracepointInfoUpdate(m *messagespb.Tracepoin } } -func (a *AgentTopicListener) onAgentFileSourceMessage(pbMessage *messagespb.FileSourceMessage) { - switch m := pbMessage.Msg.(type) { - case *messagespb.FileSourceMessage_FileSourceInfoUpdate: - a.onAgentFileSourceInfoUpdate(m.FileSourceInfoUpdate) - default: - log.WithField("message-type", reflect.TypeOf(pbMessage.Msg).String()). - Error("Unhandled message.") - } -} - -func (a *AgentTopicListener) onAgentFileSourceInfoUpdate(m *messagespb.FileSourceInfoUpdate) { - err := a.fsMgr.UpdateAgentFileSourceStatus(m.ID, m.AgentID, m.State, m.Status) - if err != nil { - log.WithError(err).Error("Could not update agent tracepoint status") - } -} - // Stop stops processing any agent messagespb. func (a *AgentTopicListener) Stop() { // Grab all the handlers in one go since calling stop will modify the map and need @@ -457,22 +433,6 @@ func (ah *AgentHandler) onAgentRegisterRequest(m *messagespb.RegisterAgentReques } } } - - // Register all file sources on new agent. - fileSources, err := ah.fsMgr.GetAllFileSources() - if err != nil { - log.WithError(err).Error("Could not get all file sources") - return - } - - for _, fs := range fileSources { - if fs.ExpectedState != statuspb.TERMINATED_STATE { - err = ah.fsMgr.RegisterFileSource(agent, utils.UUIDFromProtoOrNil(fs.ID), fs.FileSource) - if err != nil { - log.WithError(err).Error("Failed to send RegisterFileSource request") - } - } - } }() } diff --git a/src/vizier/services/metadata/controllers/agent_topic_listener_test.go b/src/vizier/services/metadata/controllers/agent_topic_listener_test.go index ad6f8369039..c71ac335204 100644 --- a/src/vizier/services/metadata/controllers/agent_topic_listener_test.go +++ b/src/vizier/services/metadata/controllers/agent_topic_listener_test.go @@ -38,8 +38,6 @@ import ( "px.dev/pixie/src/vizier/services/metadata/controllers" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" mock_agent "px.dev/pixie/src/vizier/services/metadata/controllers/agent/mock" - "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" - mock_file_source "px.dev/pixie/src/vizier/services/metadata/controllers/file_source/mock" "px.dev/pixie/src/vizier/services/metadata/controllers/testutils" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" mock_tracepoint "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint/mock" @@ -66,12 +64,11 @@ func assertSendMessageCalledWith(t *testing.T, expTopic string, expMsg messagesp } } -func setup(t *testing.T, sendMsgFn controllers.SendMessageFn) (*controllers.AgentTopicListener, *mock_agent.MockManager, *mock_tracepoint.MockStore, *mock_file_source.MockStore, func()) { +func setup(t *testing.T, sendMsgFn controllers.SendMessageFn) (*controllers.AgentTopicListener, *mock_agent.MockManager, *mock_tracepoint.MockStore, func()) { ctrl := gomock.NewController(t) mockAgtMgr := mock_agent.NewMockManager(ctrl) mockTracepointStore := mock_tracepoint.NewMockStore(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) agentInfo := new(agentpb.Agent) if err := proto.UnmarshalText(testutils.UnhealthyKelvinAgentInfo, agentInfo); err != nil { @@ -85,16 +82,14 @@ func setup(t *testing.T, sendMsgFn controllers.SendMessageFn) (*controllers.Agen Return([]*agentpb.Agent{agentInfo}, nil) tracepointMgr := tracepoint.NewManager(mockTracepointStore, mockAgtMgr, 5*time.Second) - fsMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - atl, _ := controllers.NewAgentTopicListener(mockAgtMgr, tracepointMgr, fsMgr, sendMsgFn) + atl, _ := controllers.NewAgentTopicListener(mockAgtMgr, tracepointMgr, sendMsgFn) cleanup := func() { ctrl.Finish() tracepointMgr.Close() - fsMgr.Close() } - return atl, mockAgtMgr, mockTracepointStore, mockFileSourceStore, cleanup + return atl, mockAgtMgr, mockTracepointStore, cleanup } func TestAgentRegisterRequest(t *testing.T) { @@ -114,8 +109,8 @@ func TestAgentRegisterRequest(t *testing.T) { // Set up mock. var wg sync.WaitGroup - wg.Add(2) - atl, mockAgtMgr, mockTracepointStore, mockFileSourceStore, cleanup := setup(t, sendMsg) + wg.Add(1) + atl, mockAgtMgr, mockTracepointStore, cleanup := setup(t, sendMsg) defer cleanup() agentInfo := &agentpb.Agent{ @@ -144,14 +139,6 @@ func TestAgentRegisterRequest(t *testing.T) { return nil, nil }) - mockFileSourceStore. - EXPECT(). - GetFileSources(). - DoAndReturn(func() ([]*storepb.FileSourceInfo, error) { - wg.Done() - return nil, nil - }) - req := new(messagespb.VizierMessage) if err := proto.UnmarshalText(testutils.RegisterAgentRequestPB, req); err != nil { t.Fatal("Cannot Unmarshal protobuf.") @@ -200,8 +187,8 @@ func TestKelvinRegisterRequest(t *testing.T) { // Set up mock. var wg sync.WaitGroup - wg.Add(2) - atl, mockAgtMgr, mockTracepointStore, mockFileSourceStore, cleanup := setup(t, sendMsg) + wg.Add(1) + atl, mockAgtMgr, mockTracepointStore, cleanup := setup(t, sendMsg) defer cleanup() agentInfo := &agentpb.Agent{ @@ -230,14 +217,6 @@ func TestKelvinRegisterRequest(t *testing.T) { return nil, nil }) - mockFileSourceStore. - EXPECT(). - GetFileSources(). - DoAndReturn(func() ([]*storepb.FileSourceInfo, error) { - wg.Done() - return nil, nil - }) - req := new(messagespb.VizierMessage) if err := proto.UnmarshalText(testutils.RegisterKelvinRequestPB, req); err != nil { t.Fatal("Cannot Unmarshal protobuf.") @@ -283,8 +262,8 @@ func TestAgentReRegisterRequest(t *testing.T) { // Set up mock. var wg sync.WaitGroup - wg.Add(2) - atl, mockAgtMgr, mockTracepointStore, mockFileSourceStore, cleanup := setup(t, sendMsg) + wg.Add(1) + atl, mockAgtMgr, mockTracepointStore, cleanup := setup(t, sendMsg) defer cleanup() agentInfo := &agentpb.Agent{ @@ -314,14 +293,6 @@ func TestAgentReRegisterRequest(t *testing.T) { return nil, nil }) - mockFileSourceStore. - EXPECT(). - GetFileSources(). - DoAndReturn(func() ([]*storepb.FileSourceInfo, error) { - wg.Done() - return nil, nil - }) - req := new(messagespb.VizierMessage) if err := proto.UnmarshalText(testutils.ReregisterPurgedAgentRequestPB, req); err != nil { t.Fatal("Cannot Unmarshal protobuf.") @@ -355,7 +326,7 @@ func TestAgentReRegisterRequest(t *testing.T) { func TestAgentRegisterRequestInvalidUUID(t *testing.T) { // Set up mock. - atl, _, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() req := new(messagespb.VizierMessage) @@ -373,7 +344,7 @@ func TestAgentRegisterRequestInvalidUUID(t *testing.T) { func TestAgentCreateFailed(t *testing.T) { var wg sync.WaitGroup - atl, mockAgtMgr, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, mockAgtMgr, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() req := new(messagespb.VizierMessage) @@ -427,7 +398,7 @@ func TestAgentHeartbeat(t *testing.T) { // Set up mock. var wg sync.WaitGroup - atl, mockAgtMgr, _, _, cleanup := setup(t, func(topic string, b []byte) error { + atl, mockAgtMgr, _, cleanup := setup(t, func(topic string, b []byte) error { msg := messagespb.VizierMessage{} if err := proto.Unmarshal(b, &msg); err != nil { t.Fatal("Cannot Unmarshal protobuf.") @@ -503,7 +474,7 @@ func TestAgentHeartbeat_Failed(t *testing.T) { require.NoError(t, err) // Set up mock. - atl, mockAgtMgr, _, _, cleanup := setup(t, sendMsg) + atl, mockAgtMgr, _, cleanup := setup(t, sendMsg) defer cleanup() var wg sync.WaitGroup @@ -527,7 +498,7 @@ func TestAgentHeartbeat_Failed(t *testing.T) { func TestEmptyMessage(t *testing.T) { // Set up mock. - atl, _, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() req := new(messagespb.VizierMessage) reqPb, err := req.Marshal() @@ -541,7 +512,7 @@ func TestEmptyMessage(t *testing.T) { func TestUnhandledMessage(t *testing.T) { // Set up mock. - atl, _, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, _, _, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() req := new(messagespb.VizierMessage) @@ -559,7 +530,7 @@ func TestUnhandledMessage(t *testing.T) { func TestAgentTracepointInfoUpdate(t *testing.T) { // Set up mock. - atl, _, mockTracepointStore, _, cleanup := setup(t, assertSendMessageUncalled(t)) + atl, _, mockTracepointStore, cleanup := setup(t, assertSendMessageUncalled(t)) defer cleanup() agentID := uuid.Must(uuid.NewV4()) @@ -596,45 +567,6 @@ func TestAgentTracepointInfoUpdate(t *testing.T) { require.NoError(t, err) } -func TestAgentFileSourceInfoUpdate(t *testing.T) { - // Set up mock. - atl, _, _, mockFileSourceStore, cleanup := setup(t, assertSendMessageUncalled(t)) - defer cleanup() - - agentID := uuid.Must(uuid.NewV4()) - tpID := uuid.Must(uuid.NewV4()) - - mockFileSourceStore. - EXPECT(). - UpdateFileSourceState(&storepb.AgentFileSourceStatus{ - ID: utils.ProtoFromUUID(tpID), - AgentID: utils.ProtoFromUUID(agentID), - State: statuspb.RUNNING_STATE, - }). - Return(nil) - - req := &messagespb.VizierMessage{ - Msg: &messagespb.VizierMessage_FileSourceMessage{ - FileSourceMessage: &messagespb.FileSourceMessage{ - Msg: &messagespb.FileSourceMessage_FileSourceInfoUpdate{ - FileSourceInfoUpdate: &messagespb.FileSourceInfoUpdate{ - ID: utils.ProtoFromUUID(tpID), - AgentID: utils.ProtoFromUUID(agentID), - State: statuspb.RUNNING_STATE, - }, - }, - }, - }, - } - reqPb, err := req.Marshal() - require.NoError(t, err) - - msg := nats.Msg{} - msg.Data = reqPb - err = atl.HandleMessage(&msg) - require.NoError(t, err) -} - func TestAgentStop(t *testing.T) { u, err := uuid.FromString(testutils.NewAgentUUID) require.NoError(t, err) @@ -649,7 +581,7 @@ func TestAgentStop(t *testing.T) { }) // Set up mock. - atl, _, _, _, cleanup := setup(t, sendMsg) + atl, _, _, cleanup := setup(t, sendMsg) defer cleanup() atl.StopAgent(u) diff --git a/src/vizier/services/metadata/controllers/file_source/BUILD.bazel b/src/vizier/services/metadata/controllers/file_source/BUILD.bazel deleted file mode 100644 index 933a76e91a6..00000000000 --- a/src/vizier/services/metadata/controllers/file_source/BUILD.bazel +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2018- The Pixie Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("//bazel:pl_build_system.bzl", "pl_go_test") - -go_library( - name = "file_source", - srcs = [ - "file_source.go", - "file_source_store.go", - ], - importpath = "px.dev/pixie/src/vizier/services/metadata/controllers/file_source", - visibility = ["//src/vizier:__subpackages__"], - deps = [ - "//src/api/proto/uuidpb:uuid_pl_go_proto", - "//src/carnot/planner/file_source/ir:logical_pl_go_proto", - "//src/common/base/statuspb:status_pl_go_proto", - "//src/utils", - "//src/vizier/messages/messagespb:messages_pl_go_proto", - "//src/vizier/services/metadata/storepb:store_pl_go_proto", - "//src/vizier/services/shared/agentpb:agent_pl_go_proto", - "//src/vizier/utils/datastore", - "@com_github_gofrs_uuid//:uuid", - "@com_github_gogo_protobuf//proto", - "@com_github_gogo_protobuf//types", - "@com_github_sirupsen_logrus//:logrus", - "@org_golang_google_grpc//codes", - "@org_golang_google_grpc//status", - "@org_golang_x_sync//errgroup", - ], -) - -pl_go_test( - name = "file_source_test", - srcs = [ - "file_source_store_test.go", - "file_source_test.go", - ], - embed = [":file_source"], - deps = [ - "//src/api/proto/uuidpb:uuid_pl_go_proto", - "//src/carnot/planner/file_source/ir:logical_pl_go_proto", - "//src/common/base/statuspb:status_pl_go_proto", - "//src/utils", - "//src/vizier/messages/messagespb:messages_pl_go_proto", - "//src/vizier/services/metadata/controllers/agent/mock", - "//src/vizier/services/metadata/controllers/file_source/mock", - "//src/vizier/services/metadata/storepb:store_pl_go_proto", - "//src/vizier/services/shared/agentpb:agent_pl_go_proto", - "//src/vizier/utils/datastore/pebbledb", - "@com_github_cockroachdb_pebble//:pebble", - "@com_github_cockroachdb_pebble//vfs", - "@com_github_gofrs_uuid//:uuid", - "@com_github_gogo_protobuf//proto", - "@com_github_gogo_protobuf//types", - "@com_github_golang_mock//gomock", - "@com_github_stretchr_testify//assert", - "@com_github_stretchr_testify//require", - ], -) diff --git a/src/vizier/services/metadata/controllers/file_source/file_source.go b/src/vizier/services/metadata/controllers/file_source/file_source.go deleted file mode 100644 index 770476d1632..00000000000 --- a/src/vizier/services/metadata/controllers/file_source/file_source.go +++ /dev/null @@ -1,375 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package file_source - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/gofrs/uuid" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "px.dev/pixie/src/api/proto/uuidpb" - "px.dev/pixie/src/carnot/planner/file_source/ir" - "px.dev/pixie/src/common/base/statuspb" - "px.dev/pixie/src/utils" - "px.dev/pixie/src/vizier/messages/messagespb" - "px.dev/pixie/src/vizier/services/metadata/storepb" - "px.dev/pixie/src/vizier/services/shared/agentpb" -) - -var ( - // ErrFileSourceAlreadyExists is produced if a file_source already exists with the given name - // and does not have a matching schema. - ErrFileSourceAlreadyExists = errors.New("FileSource already exists") -) - -// agentMessenger is a controller that lets us message all agents and all active agents. -type agentMessenger interface { - MessageAgents(agentIDs []uuid.UUID, msg []byte) error - MessageActiveAgents(msg []byte) error -} - -// Store is a datastore which can store, update, and retrieve information about file_sources. -type Store interface { - UpsertFileSource(uuid.UUID, *storepb.FileSourceInfo) error - GetFileSource(uuid.UUID) (*storepb.FileSourceInfo, error) - GetFileSources() ([]*storepb.FileSourceInfo, error) - UpdateFileSourceState(*storepb.AgentFileSourceStatus) error - GetFileSourceStates(uuid.UUID) ([]*storepb.AgentFileSourceStatus, error) - SetFileSourceWithName(string, uuid.UUID) error - GetFileSourcesWithNames([]string) ([]*uuid.UUID, error) - GetFileSourcesForIDs([]uuid.UUID) ([]*storepb.FileSourceInfo, error) - SetFileSourceTTL(uuid.UUID, time.Duration) error - DeleteFileSourceTTLs([]uuid.UUID) error - DeleteFileSource(uuid.UUID) error - DeleteFileSourcesForAgent(uuid.UUID) error - GetFileSourceTTLs() ([]uuid.UUID, []time.Time, error) -} - -// Manager manages the file_sources deployed in the cluster. -type Manager struct { - ts Store - agtMgr agentMessenger - - done chan struct{} - once sync.Once -} - -// NewManager creates a new file_source manager. -func NewManager(ts Store, agtMgr agentMessenger, ttlReaperDuration time.Duration) *Manager { - tm := &Manager{ - ts: ts, - agtMgr: agtMgr, - done: make(chan struct{}), - } - - go tm.watchForFileSourceExpiry(ttlReaperDuration) - return tm -} - -func (m *Manager) watchForFileSourceExpiry(ttlReaperDuration time.Duration) { - ticker := time.NewTicker(ttlReaperDuration) - defer ticker.Stop() - for { - select { - case <-m.done: - return - case <-ticker.C: - m.terminateExpiredFileSources() - } - } -} - -func (m *Manager) terminateExpiredFileSources() { - fss, err := m.ts.GetFileSources() - if err != nil { - log.WithError(err).Warn("error encountered when trying to terminating expired file_sources") - return - } - - ttlKeys, ttlVals, err := m.ts.GetFileSourceTTLs() - if err != nil { - log.WithError(err).Warn("error encountered when trying to terminating expired file_sources") - return - } - - now := time.Now() - - // Lookup for file_sources that still have an active ttl - fsActive := make(map[uuid.UUID]bool) - for i, fs := range ttlKeys { - fsActive[fs] = ttlVals[i].After(now) - } - - for _, fs := range fss { - fsID := utils.UUIDFromProtoOrNil(fs.ID) - if fsActive[fsID] { - // FileSource TTL exists and is in the future - continue - } - if fs.ExpectedState == statuspb.TERMINATED_STATE { - // FileSource is already in terminated state - continue - } - err = m.terminateFileSource(fsID) - if err != nil { - log.WithError(err).Warn("error encountered when trying to terminating expired file_sources") - } - } -} - -func (m *Manager) terminateFileSource(id uuid.UUID) error { - // Update state in datastore to terminated. - fs, err := m.ts.GetFileSource(id) - if err != nil { - return err - } - - if fs == nil { - return nil - } - - fs.ExpectedState = statuspb.TERMINATED_STATE - err = m.ts.UpsertFileSource(id, fs) - if err != nil { - return err - } - - // Send termination messages to PEMs. - fileSourceReq := messagespb.VizierMessage{ - Msg: &messagespb.VizierMessage_FileSourceMessage{ - FileSourceMessage: &messagespb.FileSourceMessage{ - Msg: &messagespb.FileSourceMessage_RemoveFileSourceRequest{ - RemoveFileSourceRequest: &messagespb.RemoveFileSourceRequest{ - ID: utils.ProtoFromUUID(id), - }, - }, - }, - }, - } - msg, err := fileSourceReq.Marshal() - if err != nil { - return err - } - - return m.agtMgr.MessageActiveAgents(msg) -} - -func (m *Manager) deleteFileSource(id uuid.UUID) error { - return m.ts.DeleteFileSource(id) -} - -// CreateFileSource creates and stores info about the given file source. -func (m *Manager) CreateFileSource(fileSourceName string, fileSourceDeployment *ir.FileSourceDeployment) (*uuid.UUID, error) { - // Check to see if a file source with the matching name already exists. - resp, err := m.ts.GetFileSourcesWithNames([]string{fileSourceName}) - if err != nil { - return nil, err - } - - if len(resp) != 1 { - return nil, errors.New("Could not fetch fileSource") - } - prevFileSourceID := resp[0] - - ttl, err := types.DurationFromProto(fileSourceDeployment.TTL) - if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to parse duration: %+v", err)) - } - - if prevFileSourceID != nil { // Existing file source already exists. - prevFileSource, err := m.ts.GetFileSource(*prevFileSourceID) - if err != nil { - return nil, err - } - if prevFileSource != nil && prevFileSource.ExpectedState != statuspb.TERMINATED_STATE { - // If everything is exactly the same, no need to redeploy - // - return prevFileSourceID, ErrFileSourceAlreadyExists - // If anything inside file sources has changed - // - delete old file sources, and insert new file sources. - - // Check if the file sources are exactly the same. - allFsSame := true - if !proto.Equal(prevFileSource.FileSource, fileSourceDeployment) { - allFsSame = false - } - - if allFsSame { - err = m.ts.SetFileSourceTTL(*prevFileSourceID, ttl) - if err != nil { - return nil, err - } - return prevFileSourceID, ErrFileSourceAlreadyExists - } - - // Something has changed, so trigger termination of the old file source. - err = m.ts.DeleteFileSourceTTLs([]uuid.UUID{*prevFileSourceID}) - if err != nil { - return nil, err - } - } - } - - fsID, err := uuid.NewV4() - if err != nil { - return nil, err - } - newFileSource := &storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(fsID), - Name: fileSourceName, - FileSource: fileSourceDeployment, - ExpectedState: statuspb.RUNNING_STATE, - } - err = m.ts.UpsertFileSource(fsID, newFileSource) - if err != nil { - return nil, err - } - err = m.ts.SetFileSourceTTL(fsID, ttl) - if err != nil { - return nil, err - } - err = m.ts.SetFileSourceWithName(fileSourceName, fsID) - if err != nil { - return nil, err - } - return &fsID, nil -} - -// GetAllFileSources gets all the file sources currently tracked by the metadata service. -func (m *Manager) GetAllFileSources() ([]*storepb.FileSourceInfo, error) { - return m.ts.GetFileSources() -} - -// UpdateAgentFileSourceStatus updates the file source info with the new agent file source status. -func (m *Manager) UpdateAgentFileSourceStatus(fileSourceID *uuidpb.UUID, agentID *uuidpb.UUID, state statuspb.LifeCycleState, status *statuspb.Status) error { - if state == statuspb.TERMINATED_STATE { // If all agent file source statuses are now terminated, we can finally delete the file source from the datastore. - tID := utils.UUIDFromProtoOrNil(fileSourceID) - states, err := m.GetFileSourceStates(tID) - if err != nil { - return err - } - allTerminated := true - for _, s := range states { - if s.State != statuspb.TERMINATED_STATE && !s.AgentID.Equal(agentID) { - allTerminated = false - break - } - } - - if allTerminated { - return m.deleteFileSource(tID) - } - } - - fileSourceState := &storepb.AgentFileSourceStatus{ - State: state, - Status: status, - ID: fileSourceID, - AgentID: agentID, - } - - return m.ts.UpdateFileSourceState(fileSourceState) -} - -// RegisterFileSource sends requests to the given agents to register the specified file source. -func (m *Manager) RegisterFileSource(agents []*agentpb.Agent, fileSourceID uuid.UUID, fileSourceDeployment *ir.FileSourceDeployment) error { - agentIDs := make([]uuid.UUID, len(agents)) - fileSourceReq := messagespb.VizierMessage{ - Msg: &messagespb.VizierMessage_FileSourceMessage{ - FileSourceMessage: &messagespb.FileSourceMessage{ - Msg: &messagespb.FileSourceMessage_RegisterFileSourceRequest{ - RegisterFileSourceRequest: &messagespb.RegisterFileSourceRequest{ - FileSourceDeployment: fileSourceDeployment, - ID: utils.ProtoFromUUID(fileSourceID), - }, - }, - }, - }, - } - msg, err := fileSourceReq.Marshal() - if err != nil { - return err - } - for i, agt := range agents { - agentIDs[i] = utils.UUIDFromProtoOrNil(agt.Info.AgentID) - } - - err = m.agtMgr.MessageAgents(agentIDs, msg) - - if err != nil { - return err - } - - return nil -} - -// GetFileSourceInfo gets the status for the file source with the given ID. -func (m *Manager) GetFileSourceInfo(fileSourceID uuid.UUID) (*storepb.FileSourceInfo, error) { - return m.ts.GetFileSource(fileSourceID) -} - -// GetFileSourceStates gets all the known agent states for the given file source. -func (m *Manager) GetFileSourceStates(fileSourceID uuid.UUID) ([]*storepb.AgentFileSourceStatus, error) { - return m.ts.GetFileSourceStates(fileSourceID) -} - -// GetFileSourcesForIDs gets all the file source infos for the given ids. -func (m *Manager) GetFileSourcesForIDs(ids []uuid.UUID) ([]*storepb.FileSourceInfo, error) { - return m.ts.GetFileSourcesForIDs(ids) -} - -// RemoveFileSources starts the termination process for the file sources with the given names. -func (m *Manager) RemoveFileSources(names []string) error { - fsIDs, err := m.ts.GetFileSourcesWithNames(names) - if err != nil { - return err - } - - ids := make([]uuid.UUID, len(fsIDs)) - - for i, id := range fsIDs { - if id == nil { - return fmt.Errorf("Could not find file source for given name: %s", names[i]) - } - ids[i] = *id - } - - return m.ts.DeleteFileSourceTTLs(ids) -} - -// DeleteAgent deletes file sources on the given agent. -func (m *Manager) DeleteAgent(agentID uuid.UUID) error { - return m.ts.DeleteFileSourcesForAgent(agentID) -} - -// Close cleans up the goroutines created and renders this no longer useable. -func (m *Manager) Close() { - m.once.Do(func() { - close(m.done) - }) - m.ts = nil - m.agtMgr = nil -} diff --git a/src/vizier/services/metadata/controllers/file_source/file_source_store.go b/src/vizier/services/metadata/controllers/file_source/file_source_store.go deleted file mode 100644 index 8ad9d729a0a..00000000000 --- a/src/vizier/services/metadata/controllers/file_source/file_source_store.go +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package file_source - -import ( - "path" - "strings" - "time" - - "github.com/gofrs/uuid" - "github.com/gogo/protobuf/proto" - "golang.org/x/sync/errgroup" - - "px.dev/pixie/src/api/proto/uuidpb" - "px.dev/pixie/src/utils" - "px.dev/pixie/src/vizier/services/metadata/storepb" - "px.dev/pixie/src/vizier/utils/datastore" -) - -const ( - fileSourcesPrefix = "/fileSource/" - fileSourceStatesPrefix = "/fileSourceStates/" - fileSourceTTLsPrefix = "/fileSourceTTL/" - fileSourceNamesPrefix = "/fileSourceName/" -) - -// Datastore implements the FileSourceStore interface on a given Datastore. -type Datastore struct { - ds datastore.MultiGetterSetterDeleterCloser -} - -// NewDatastore wraps the datastore in a file source store -func NewDatastore(ds datastore.MultiGetterSetterDeleterCloser) *Datastore { - return &Datastore{ds: ds} -} - -func getFileSourceWithNameKey(fileSourceName string) string { - return path.Join(fileSourceNamesPrefix, fileSourceName) -} - -func getFileSourceKey(fileSourceID uuid.UUID) string { - return path.Join(fileSourcesPrefix, fileSourceID.String()) -} - -func getFileSourceStatesKey(fileSourceID uuid.UUID) string { - return path.Join(fileSourceStatesPrefix, fileSourceID.String()) -} - -func getFileSourceStateKey(fileSourceID uuid.UUID, agentID uuid.UUID) string { - return path.Join(fileSourceStatesPrefix, fileSourceID.String(), agentID.String()) -} - -func getFileSourceTTLKey(fileSourceID uuid.UUID) string { - return path.Join(fileSourceTTLsPrefix, fileSourceID.String()) -} - -// GetFileSourcesWithNames gets which file source is associated with the given name. -func (t *Datastore) GetFileSourcesWithNames(fileSourceNames []string) ([]*uuid.UUID, error) { - eg := errgroup.Group{} - ids := make([]*uuid.UUID, len(fileSourceNames)) - for i := 0; i < len(fileSourceNames); i++ { - i := i // Closure for goroutine - eg.Go(func() error { - val, err := t.ds.Get(getFileSourceWithNameKey(fileSourceNames[i])) - if err != nil { - return err - } - if val == nil { - return nil - } - uuidPB := &uuidpb.UUID{} - err = proto.Unmarshal(val, uuidPB) - if err != nil { - return err - } - id := utils.UUIDFromProtoOrNil(uuidPB) - ids[i] = &id - return nil - }) - } - err := eg.Wait() - if err != nil { - return nil, err - } - - return ids, nil -} - -// SetFileSourceWithName associates the file source with the given name with the one with the provided ID. -func (t *Datastore) SetFileSourceWithName(fileSourceName string, fileSourceID uuid.UUID) error { - fileSourceIDpb := utils.ProtoFromUUID(fileSourceID) - val, err := fileSourceIDpb.Marshal() - if err != nil { - return err - } - - return t.ds.Set(getFileSourceWithNameKey(fileSourceName), string(val)) -} - -// UpsertFileSource updates or creates a new file source entry in the store. -func (t *Datastore) UpsertFileSource(fileSourceID uuid.UUID, fileSourceInfo *storepb.FileSourceInfo) error { - val, err := fileSourceInfo.Marshal() - if err != nil { - return err - } - - return t.ds.Set(getFileSourceKey(fileSourceID), string(val)) -} - -// DeleteFileSource deletes the file source from the store. -func (t *Datastore) DeleteFileSource(fileSourceID uuid.UUID) error { - err := t.ds.DeleteAll([]string{getFileSourceKey(fileSourceID)}) - if err != nil { - return err - } - - return t.ds.DeleteWithPrefix(getFileSourceStatesKey(fileSourceID)) -} - -// GetFileSource gets the file source info from the store, if it exists. -func (t *Datastore) GetFileSource(fileSourceID uuid.UUID) (*storepb.FileSourceInfo, error) { - resp, err := t.ds.Get(getFileSourceKey(fileSourceID)) - if err != nil { - return nil, err - } - if resp == nil { - return nil, nil - } - - fileSourcePb := &storepb.FileSourceInfo{} - err = proto.Unmarshal(resp, fileSourcePb) - if err != nil { - return nil, err - } - return fileSourcePb, nil -} - -// GetFileSources gets all of the file source s in the store. -func (t *Datastore) GetFileSources() ([]*storepb.FileSourceInfo, error) { - _, vals, err := t.ds.GetWithPrefix(fileSourcesPrefix) - if err != nil { - return nil, err - } - - fileSources := make([]*storepb.FileSourceInfo, len(vals)) - for i, val := range vals { - pb := &storepb.FileSourceInfo{} - err := proto.Unmarshal(val, pb) - if err != nil { - continue - } - fileSources[i] = pb - } - return fileSources, nil -} - -// GetFileSourcesForIDs gets all of the file source s with the given it.ds. -func (t *Datastore) GetFileSourcesForIDs(ids []uuid.UUID) ([]*storepb.FileSourceInfo, error) { - eg := errgroup.Group{} - fileSources := make([]*storepb.FileSourceInfo, len(ids)) - for i := 0; i < len(ids); i++ { - i := i // Closure for goroutine - eg.Go(func() error { - val, err := t.ds.Get(getFileSourceKey(ids[i])) - if err != nil { - return err - } - if val == nil { - return nil - } - fs := &storepb.FileSourceInfo{} - err = proto.Unmarshal(val, fs) - if err != nil { - return err - } - fileSources[i] = fs - return nil - }) - } - - err := eg.Wait() - if err != nil { - return nil, err - } - - return fileSources, nil -} - -// UpdateFileSourceState updates the agent file source state in the store. -func (t *Datastore) UpdateFileSourceState(state *storepb.AgentFileSourceStatus) error { - val, err := state.Marshal() - if err != nil { - return err - } - - fsID := utils.UUIDFromProtoOrNil(state.ID) - - return t.ds.Set(getFileSourceStateKey(fsID, utils.UUIDFromProtoOrNil(state.AgentID)), string(val)) -} - -// GetFileSourceStates gets all the agentFileSource states for the given file source . -func (t *Datastore) GetFileSourceStates(fileSourceID uuid.UUID) ([]*storepb.AgentFileSourceStatus, error) { - _, vals, err := t.ds.GetWithPrefix(getFileSourceStatesKey(fileSourceID)) - if err != nil { - return nil, err - } - - fileSources := make([]*storepb.AgentFileSourceStatus, len(vals)) - for i, val := range vals { - pb := &storepb.AgentFileSourceStatus{} - err := proto.Unmarshal(val, pb) - if err != nil { - continue - } - fileSources[i] = pb - } - return fileSources, nil -} - -// SetFileSourceTTL creates a key in the datastore with the given TTL. This represents the amount of time -// that the given file source should be persisted before terminating. -func (t *Datastore) SetFileSourceTTL(fileSourceID uuid.UUID, ttl time.Duration) error { - expiresAt := time.Now().Add(ttl) - encodedExpiry, err := expiresAt.MarshalBinary() - if err != nil { - return err - } - return t.ds.SetWithTTL(getFileSourceTTLKey(fileSourceID), string(encodedExpiry), ttl) -} - -// DeleteFileSourceTTLs deletes the key in the datastore for the given file source TTLs. -// This is done as a single transaction, so if any deletes fail, they all fail. -func (t *Datastore) DeleteFileSourceTTLs(ids []uuid.UUID) error { - keys := make([]string, len(ids)) - for i, id := range ids { - keys[i] = getFileSourceTTLKey(id) - } - - return t.ds.DeleteAll(keys) -} - -// DeleteFileSourcesForAgent deletes the file source s for a given agent. -// Note this only purges the combo file source ID+agentID keys. Said -// file source s might still be valid and deployed on other agents. -func (t *Datastore) DeleteFileSourcesForAgent(agentID uuid.UUID) error { - fss, err := t.GetFileSources() - if err != nil { - return err - } - - delKeys := make([]string, len(fss)) - for i, fs := range fss { - delKeys[i] = getFileSourceStateKey(utils.UUIDFromProtoOrNil(fs.ID), agentID) - } - - return t.ds.DeleteAll(delKeys) -} - -// GetFileSourceTTLs gets the file source s which still have existing TTLs. -func (t *Datastore) GetFileSourceTTLs() ([]uuid.UUID, []time.Time, error) { - keys, vals, err := t.ds.GetWithPrefix(fileSourceTTLsPrefix) - if err != nil { - return nil, nil, err - } - - var ids []uuid.UUID - var expirations []time.Time - - for i, k := range keys { - keyParts := strings.Split(k, "/") - if len(keyParts) != 3 { - continue - } - id, err := uuid.FromString(keyParts[2]) - if err != nil { - continue - } - var expiresAt time.Time - err = expiresAt.UnmarshalBinary(vals[i]) - if err != nil { - // This shouldn't happen for new keys, but we might have added TTLs - // in the past without a value. So just pick some time sufficiently - // in the future. - // This value is only used to determine what file source s are expired - // as of _NOW_ so this is "safe". - expiresAt = time.Now().Add(30 * 24 * time.Hour) - } - ids = append(ids, id) - expirations = append(expirations, expiresAt) - } - - return ids, expirations, nil -} diff --git a/src/vizier/services/metadata/controllers/file_source/file_source_store_test.go b/src/vizier/services/metadata/controllers/file_source/file_source_store_test.go deleted file mode 100644 index f43caa8271e..00000000000 --- a/src/vizier/services/metadata/controllers/file_source/file_source_store_test.go +++ /dev/null @@ -1,364 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package file_source - -import ( - "os" - "testing" - "time" - - "github.com/cockroachdb/pebble" - "github.com/cockroachdb/pebble/vfs" - "github.com/gofrs/uuid" - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "px.dev/pixie/src/api/proto/uuidpb" - "px.dev/pixie/src/common/base/statuspb" - "px.dev/pixie/src/utils" - "px.dev/pixie/src/vizier/services/metadata/storepb" - "px.dev/pixie/src/vizier/utils/datastore/pebbledb" -) - -func setupTest(t *testing.T) (*pebbledb.DataStore, *Datastore, func()) { - memFS := vfs.NewMem() - c, err := pebble.Open("test", &pebble.Options{ - FS: memFS, - }) - if err != nil { - t.Fatal("failed to initialize a pebbledb") - os.Exit(1) - } - - db := pebbledb.New(c, 3*time.Second) - ts := NewDatastore(db) - cleanup := func() { - err := db.Close() - if err != nil { - t.Fatal("Failed to close db") - } - } - - return db, ts, cleanup -} - -func TestFileSourceStore_UpsertFileSource(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - tpID := uuid.Must(uuid.NewV4()) - // Create file sources. - s1 := &storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(tpID), - } - - err := ts.UpsertFileSource(tpID, s1) - require.NoError(t, err) - - savedFileSource, err := db.Get("/fileSource/" + tpID.String()) - require.NoError(t, err) - savedFileSourcePb := &storepb.FileSourceInfo{} - err = proto.Unmarshal(savedFileSource, savedFileSourcePb) - require.NoError(t, err) - assert.Equal(t, s1, savedFileSourcePb) -} - -func TestFileSourceStore_GetFileSource(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - tpID := uuid.Must(uuid.NewV4()) - // Create file sources. - s1 := &storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(tpID), - } - s1Text, err := s1.Marshal() - if err != nil { - t.Fatal("Unable to marshal file source pb") - } - - err = db.Set("/fileSource/"+tpID.String(), string(s1Text)) - require.NoError(t, err) - - fileSource, err := ts.GetFileSource(tpID) - require.NoError(t, err) - assert.NotNil(t, fileSource) - - assert.Equal(t, s1.ID, fileSource.ID) -} - -func TestFileSourceStore_GetFileSources(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - // Create file sources. - s1ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c8") - s1 := &storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(s1ID), - } - s1Text, err := s1.Marshal() - if err != nil { - t.Fatal("Unable to marshal file source pb") - } - - s2ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c9") - s2 := &storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(s2ID), - } - s2Text, err := s2.Marshal() - if err != nil { - t.Fatal("Unable to marshal file source pb") - } - - err = db.Set("/fileSource/"+s1ID.String(), string(s1Text)) - require.NoError(t, err) - err = db.Set("/fileSource/"+s2ID.String(), string(s2Text)) - require.NoError(t, err) - - fileSources, err := ts.GetFileSources() - require.NoError(t, err) - assert.Equal(t, 2, len(fileSources)) - - ids := make([]string, len(fileSources)) - for i, tp := range fileSources { - ids[i] = utils.ProtoToUUIDStr(tp.ID) - } - - assert.Contains(t, ids, utils.ProtoToUUIDStr(s1.ID)) - assert.Contains(t, ids, utils.ProtoToUUIDStr(s2.ID)) -} - -func TestFileSourceStore_GetFileSourcesForIDs(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - // Create file sources. - s1ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c8") - s1 := &storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(s1ID), - } - s1Text, err := s1.Marshal() - if err != nil { - t.Fatal("Unable to marshal file source pb") - } - - s2ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c9") - s2 := &storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(s2ID), - } - s2Text, err := s2.Marshal() - if err != nil { - t.Fatal("Unable to marshal file source pb") - } - - s3ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c7") - - err = db.Set("/fileSource/"+s1ID.String(), string(s1Text)) - require.NoError(t, err) - err = db.Set("/fileSource/"+s2ID.String(), string(s2Text)) - require.NoError(t, err) - - fileSources, err := ts.GetFileSourcesForIDs([]uuid.UUID{s1ID, s2ID, s3ID}) - require.NoError(t, err) - assert.Equal(t, 3, len(fileSources)) - - ids := make([]string, len(fileSources)) - for i, tp := range fileSources { - if tp == nil || tp.ID == nil { - continue - } - ids[i] = utils.ProtoToUUIDStr(tp.ID) - } - - assert.Contains(t, ids, utils.ProtoToUUIDStr(s1.ID)) - assert.Contains(t, ids, utils.ProtoToUUIDStr(s2.ID)) -} - -func TestFileSourceStore_UpdateFileSourceState(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - agentID := uuid.Must(uuid.NewV4()) - tpID := uuid.Must(uuid.NewV4()) - // Create file source state - s1 := &storepb.AgentFileSourceStatus{ - ID: utils.ProtoFromUUID(tpID), - AgentID: utils.ProtoFromUUID(agentID), - State: statuspb.RUNNING_STATE, - } - - err := ts.UpdateFileSourceState(s1) - require.NoError(t, err) - - savedFileSource, err := db.Get("/fileSourceStates/" + tpID.String() + "/" + agentID.String()) - require.NoError(t, err) - savedFileSourcePb := &storepb.AgentFileSourceStatus{} - err = proto.Unmarshal(savedFileSource, savedFileSourcePb) - require.NoError(t, err) - assert.Equal(t, s1, savedFileSourcePb) -} - -func TestFileSourceStore_GetFileSourceStates(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - tpID := uuid.Must(uuid.NewV4()) - - agentID1 := uuid.FromStringOrNil("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - agentID2 := uuid.FromStringOrNil("6ba7b810-9dad-11d1-80b4-00c04fd430c9") - - // Create file sources. - s1 := &storepb.AgentFileSourceStatus{ - ID: utils.ProtoFromUUID(tpID), - AgentID: utils.ProtoFromUUID(agentID1), - State: statuspb.RUNNING_STATE, - } - s1Text, err := s1.Marshal() - if err != nil { - t.Fatal("Unable to marshal file source pb") - } - - s2 := &storepb.AgentFileSourceStatus{ - ID: utils.ProtoFromUUID(tpID), - AgentID: utils.ProtoFromUUID(agentID2), - State: statuspb.PENDING_STATE, - } - s2Text, err := s2.Marshal() - if err != nil { - t.Fatal("Unable to marshal file source pb") - } - - err = db.Set("/fileSourceStates/"+tpID.String()+"/"+agentID1.String(), string(s1Text)) - require.NoError(t, err) - err = db.Set("/fileSourceStates/"+tpID.String()+"/"+agentID2.String(), string(s2Text)) - require.NoError(t, err) - - fileSources, err := ts.GetFileSourceStates(tpID) - require.NoError(t, err) - assert.Equal(t, 2, len(fileSources)) - - agentIDs := make([]string, len(fileSources)) - for i, tp := range fileSources { - agentIDs[i] = utils.ProtoToUUIDStr(tp.AgentID) - } - - assert.Contains(t, agentIDs, utils.ProtoToUUIDStr(s1.AgentID)) - assert.Contains(t, agentIDs, utils.ProtoToUUIDStr(s2.AgentID)) -} - -func TestFileSourceStore_SetFileSourceWithName(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - tpID := uuid.Must(uuid.NewV4()) - - err := ts.SetFileSourceWithName("test", tpID) - require.NoError(t, err) - - savedFileSource, err := db.Get("/fileSourceName/test") - require.NoError(t, err) - savedFileSourcePb := &uuidpb.UUID{} - err = proto.Unmarshal(savedFileSource, savedFileSourcePb) - require.NoError(t, err) - assert.Equal(t, tpID, utils.UUIDFromProtoOrNil(savedFileSourcePb)) -} - -func TestFileSourceStore_GetFileSourcesWithNames(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - tpID := uuid.Must(uuid.NewV4()) - fileSourceIDpb := utils.ProtoFromUUID(tpID) - val, err := fileSourceIDpb.Marshal() - require.NoError(t, err) - - tpID2 := uuid.Must(uuid.NewV4()) - fileSourceIDpb2 := utils.ProtoFromUUID(tpID2) - val2, err := fileSourceIDpb2.Marshal() - require.NoError(t, err) - - err = db.Set("/fileSourceName/test", string(val)) - require.NoError(t, err) - err = db.Set("/fileSourceName/test2", string(val2)) - require.NoError(t, err) - - fileSources, err := ts.GetFileSourcesWithNames([]string{"test", "test2"}) - require.NoError(t, err) - assert.Equal(t, 2, len(fileSources)) - - tps := make([]string, len(fileSources)) - for i, tp := range fileSources { - tps[i] = tp.String() - } - - assert.Contains(t, tps, tpID.String()) - assert.Contains(t, tps, tpID2.String()) -} - -func TestFileSourceStore_DeleteFileSource(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - tpID := uuid.Must(uuid.NewV4()) - - err := db.Set("/fileSource/"+tpID.String(), "test") - require.NoError(t, err) - - err = ts.DeleteFileSource(tpID) - require.NoError(t, err) - - val, err := db.Get("/fileSource/" + tpID.String()) - require.NoError(t, err) - assert.Nil(t, val) -} - -func TestFileSourceStore_DeleteFileSourceTTLs(t *testing.T) { - _, ts, cleanup := setupTest(t) - defer cleanup() - - tpID := uuid.Must(uuid.NewV4()) - tpID2 := uuid.Must(uuid.NewV4()) - - err := ts.DeleteFileSourceTTLs([]uuid.UUID{tpID, tpID2}) - require.NoError(t, err) -} - -func TestFileSourceStore_GetFileSourceTTLs(t *testing.T) { - db, ts, cleanup := setupTest(t) - defer cleanup() - - // Create file sources. - s1ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c8") - s2ID := uuid.FromStringOrNil("8ba7b810-9dad-11d1-80b4-00c04fd430c9") - - err := db.Set("/fileSourceTTL/"+s1ID.String(), "") - require.NoError(t, err) - err = db.Set("/fileSourceTTL/"+s2ID.String(), "") - require.NoError(t, err) - err = db.Set("/fileSourceTTL/invalid", "") - require.NoError(t, err) - - fileSources, _, err := ts.GetFileSourceTTLs() - require.NoError(t, err) - assert.Equal(t, 2, len(fileSources)) - - assert.Contains(t, fileSources, s1ID) - assert.Contains(t, fileSources, s2ID) -} diff --git a/src/vizier/services/metadata/controllers/file_source/file_source_test.go b/src/vizier/services/metadata/controllers/file_source/file_source_test.go deleted file mode 100644 index f6ac693bca1..00000000000 --- a/src/vizier/services/metadata/controllers/file_source/file_source_test.go +++ /dev/null @@ -1,528 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package file_source_test - -import ( - "sync" - "testing" - "time" - - "github.com/gofrs/uuid" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "px.dev/pixie/src/carnot/planner/file_source/ir" - "px.dev/pixie/src/common/base/statuspb" - "px.dev/pixie/src/utils" - "px.dev/pixie/src/vizier/messages/messagespb" - mock_agent "px.dev/pixie/src/vizier/services/metadata/controllers/agent/mock" - "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" - mock_file_source "px.dev/pixie/src/vizier/services/metadata/controllers/file_source/mock" - "px.dev/pixie/src/vizier/services/metadata/storepb" - "px.dev/pixie/src/vizier/services/shared/agentpb" -) - -func TestCreateFileSource(t *testing.T) { - tests := []struct { - name string - originalFileSource *ir.FileSourceDeployment - originalFileSourceState statuspb.LifeCycleState - newFileSource *ir.FileSourceDeployment - expectError bool - expectOldUpdated bool - expectTTLUpdateOnly bool - }{ - { - name: "test_file_source", - originalFileSource: nil, - newFileSource: &ir.FileSourceDeployment{ - GlobPattern: "/tmp/test", - TableName: "/tmp/test", - TTL: &types.Duration{ - Seconds: 5, - }, - }, - expectError: false, - }, - { - name: "existing file source match", - originalFileSource: &ir.FileSourceDeployment{ - GlobPattern: "/tmp/test", - TableName: "/tmp/test", - TTL: &types.Duration{ - Seconds: 5, - }, - }, - originalFileSourceState: statuspb.RUNNING_STATE, - newFileSource: &ir.FileSourceDeployment{ - GlobPattern: "/tmp/test", - TableName: "/tmp/test", - TTL: &types.Duration{ - Seconds: 5, - }, - }, - expectTTLUpdateOnly: true, - }, - { - name: "existing file source, not exactly the same (1)", - originalFileSource: &ir.FileSourceDeployment{ - GlobPattern: "/tmp/test", - TableName: "/tmp/test", - TTL: &types.Duration{ - Seconds: 5, - }, - }, - originalFileSourceState: statuspb.RUNNING_STATE, - newFileSource: &ir.FileSourceDeployment{ - GlobPattern: "/tmp/test.json", - TableName: "/tmp/test", - TTL: &types.Duration{ - Seconds: 5, - }, - }, - expectOldUpdated: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - origID := uuid.Must(uuid.NewV4()) - - if test.originalFileSource == nil { - mockFileSourceStore. - EXPECT(). - GetFileSourcesWithNames([]string{"test_file_source"}). - Return([]*uuid.UUID{nil}, nil) - } else { - mockFileSourceStore. - EXPECT(). - GetFileSourcesWithNames([]string{"test_file_source"}). - Return([]*uuid.UUID{&origID}, nil) - mockFileSourceStore. - EXPECT(). - GetFileSource(origID). - Return(&storepb.FileSourceInfo{ - ExpectedState: test.originalFileSourceState, - FileSource: test.originalFileSource, - }, nil) - } - - if test.expectTTLUpdateOnly { - mockFileSourceStore. - EXPECT(). - SetFileSourceTTL(origID, time.Second*5) - } - - if test.expectOldUpdated { - mockFileSourceStore. - EXPECT(). - DeleteFileSourceTTLs([]uuid.UUID{origID}). - Return(nil) - } - - var newID uuid.UUID - - if !test.expectError && !test.expectTTLUpdateOnly { - mockFileSourceStore. - EXPECT(). - UpsertFileSource(gomock.Any(), gomock.Any()). - DoAndReturn(func(id uuid.UUID, tpInfo *storepb.FileSourceInfo) error { - newID = id - assert.Equal(t, &storepb.FileSourceInfo{ - FileSource: test.newFileSource, - Name: "test_file_source", - ID: utils.ProtoFromUUID(id), - ExpectedState: statuspb.RUNNING_STATE, - }, tpInfo) - return nil - }) - - mockFileSourceStore. - EXPECT(). - SetFileSourceWithName("test_file_source", gomock.Any()). - DoAndReturn(func(name string, id uuid.UUID) error { - assert.Equal(t, newID, id) - return nil - }) - - mockFileSourceStore. - EXPECT(). - SetFileSourceTTL(gomock.Any(), time.Second*5). - DoAndReturn(func(id uuid.UUID, ttl time.Duration) error { - assert.Equal(t, newID, id) - return nil - }) - } - - mockAgtMgr := mock_agent.NewMockManager(ctrl) - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - defer fileSourceMgr.Close() - - actualFsID, err := fileSourceMgr.CreateFileSource("test_file_source", test.newFileSource) - if test.expectError || test.expectTTLUpdateOnly { - assert.Equal(t, file_source.ErrFileSourceAlreadyExists, err) - } else { - require.NoError(t, err) - assert.Equal(t, &newID, actualFsID) - } - }) - } -} - -func TestGetFileSources(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockAgtMgr := mock_agent.NewMockManager(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - defer fileSourceMgr.Close() - - tID1 := uuid.Must(uuid.NewV4()) - tID2 := uuid.Must(uuid.NewV4()) - expectedFileSourceInfo := []*storepb.FileSourceInfo{ - { - ID: utils.ProtoFromUUID(tID1), - }, - { - ID: utils.ProtoFromUUID(tID2), - }, - } - - mockFileSourceStore. - EXPECT(). - GetFileSources(). - Return(expectedFileSourceInfo, nil) - - fileSources, err := fileSourceMgr.GetAllFileSources() - require.NoError(t, err) - assert.Equal(t, expectedFileSourceInfo, fileSources) -} - -func TestGetFileSourceInfo(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockAgtMgr := mock_agent.NewMockManager(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - defer fileSourceMgr.Close() - - fsID1 := uuid.Must(uuid.NewV4()) - expectedFileSourceInfo := &storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(fsID1), - } - - mockFileSourceStore. - EXPECT(). - GetFileSource(fsID1). - Return(expectedFileSourceInfo, nil) - - fileSources, err := fileSourceMgr.GetFileSourceInfo(fsID1) - require.NoError(t, err) - assert.Equal(t, expectedFileSourceInfo, fileSources) -} - -func TestGetFileSourceStates(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockAgtMgr := mock_agent.NewMockManager(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - defer fileSourceMgr.Close() - - agentUUID1 := uuid.Must(uuid.NewV4()) - tID1 := uuid.Must(uuid.NewV4()) - expectedFileSourceStatus1 := &storepb.AgentFileSourceStatus{ - ID: utils.ProtoFromUUID(tID1), - AgentID: utils.ProtoFromUUID(agentUUID1), - State: statuspb.RUNNING_STATE, - } - - agentUUID2 := uuid.Must(uuid.NewV4()) - expectedFileSourceStatus2 := &storepb.AgentFileSourceStatus{ - ID: utils.ProtoFromUUID(tID1), - AgentID: utils.ProtoFromUUID(agentUUID2), - State: statuspb.PENDING_STATE, - } - - mockFileSourceStore. - EXPECT(). - GetFileSourceStates(tID1). - Return([]*storepb.AgentFileSourceStatus{expectedFileSourceStatus1, expectedFileSourceStatus2}, nil) - - fileSources, err := fileSourceMgr.GetFileSourceStates(tID1) - require.NoError(t, err) - assert.Equal(t, expectedFileSourceStatus1, fileSources[0]) - assert.Equal(t, expectedFileSourceStatus2, fileSources[1]) -} - -func TestRegisterFileSource(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockAgtMgr := mock_agent.NewMockManager(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - defer fileSourceMgr.Close() - - agentUUID1 := uuid.Must(uuid.NewV4()) - agentUUID2 := uuid.Must(uuid.NewV4()) - upb1 := utils.ProtoFromUUID(agentUUID1) - upb2 := utils.ProtoFromUUID(agentUUID2) - mockAgents := []*agentpb.Agent{ - // Should match programUpTo5.18.0 and programFrom5.10.0To5.18.0 - { - Info: &agentpb.AgentInfo{ - AgentID: upb1, - }, - }, - { - Info: &agentpb.AgentInfo{ - AgentID: upb2, - }, - }, - } - - fileSourceID := uuid.Must(uuid.NewV4()) - fileSourceDeployment := &ir.FileSourceDeployment{} - expectedFileSourceReq := messagespb.VizierMessage{ - Msg: &messagespb.VizierMessage_FileSourceMessage{ - FileSourceMessage: &messagespb.FileSourceMessage{ - Msg: &messagespb.FileSourceMessage_RegisterFileSourceRequest{ - RegisterFileSourceRequest: &messagespb.RegisterFileSourceRequest{ - FileSourceDeployment: fileSourceDeployment, - ID: utils.ProtoFromUUID(fileSourceID), - }, - }, - }, - }, - } - // Serialize file source request proto into byte slice to compare with the actual message sent to agents. - msg1, err := expectedFileSourceReq.Marshal() - if err != nil { - t.Fatal(err) - } - - mockAgtMgr. - EXPECT(). - MessageAgents([]uuid.UUID{agentUUID1, agentUUID2}, msg1). - Return(nil) - - err = fileSourceMgr.RegisterFileSource(mockAgents, fileSourceID, fileSourceDeployment) - require.NoError(t, err) -} - -func TestUpdateAgentFileSourceStatus(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockAgtMgr := mock_agent.NewMockManager(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - defer fileSourceMgr.Close() - - agentUUID1 := uuid.Must(uuid.NewV4()) - fsID := uuid.Must(uuid.NewV4()) - expectedFileSourceState := &storepb.AgentFileSourceStatus{ - ID: utils.ProtoFromUUID(fsID), - AgentID: utils.ProtoFromUUID(agentUUID1), - State: statuspb.RUNNING_STATE, - } - - mockFileSourceStore. - EXPECT(). - UpdateFileSourceState(expectedFileSourceState). - Return(nil) - - err := fileSourceMgr.UpdateAgentFileSourceStatus(utils.ProtoFromUUID(fsID), utils.ProtoFromUUID(agentUUID1), statuspb.RUNNING_STATE, nil) - require.NoError(t, err) -} - -func TestUpdateAgentFileSourceStatus_Terminated(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockAgtMgr := mock_agent.NewMockManager(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - defer fileSourceMgr.Close() - agentUUID1 := uuid.Must(uuid.NewV4()) - fsID := uuid.Must(uuid.NewV4()) - agentUUID2 := uuid.Must(uuid.NewV4()) - - mockFileSourceStore. - EXPECT(). - GetFileSourceStates(fsID). - Return([]*storepb.AgentFileSourceStatus{ - {AgentID: utils.ProtoFromUUID(agentUUID1), State: statuspb.TERMINATED_STATE}, - {AgentID: utils.ProtoFromUUID(agentUUID2), State: statuspb.RUNNING_STATE}, - }, nil) - - mockFileSourceStore. - EXPECT(). - DeleteFileSource(fsID). - Return(nil) - - err := fileSourceMgr.UpdateAgentFileSourceStatus(utils.ProtoFromUUID(fsID), utils.ProtoFromUUID(agentUUID2), statuspb.TERMINATED_STATE, nil) - require.NoError(t, err) -} - -func TestTTLExpiration(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockAgtMgr := mock_agent.NewMockManager(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - defer fileSourceMgr.Close() - - agentUUID1 := uuid.Must(uuid.NewV4()) - fsID := uuid.Must(uuid.NewV4()) - agentUUID2 := uuid.Must(uuid.NewV4()) - - mockFileSourceStore. - EXPECT(). - GetFileSourceStates(fsID). - Return([]*storepb.AgentFileSourceStatus{ - {AgentID: utils.ProtoFromUUID(agentUUID1), State: statuspb.TERMINATED_STATE}, - {AgentID: utils.ProtoFromUUID(agentUUID2), State: statuspb.RUNNING_STATE}, - }, nil) - - mockFileSourceStore. - EXPECT(). - DeleteFileSource(fsID). - Return(nil) - - err := fileSourceMgr.UpdateAgentFileSourceStatus(utils.ProtoFromUUID(fsID), utils.ProtoFromUUID(agentUUID2), statuspb.TERMINATED_STATE, nil) - require.NoError(t, err) -} - -func TestUpdateAgentFileSourceStatus_RemoveFileSources(t *testing.T) { - // Set up mock. - ctrl := gomock.NewController(t) - defer ctrl.Finish() - mockAgtMgr := mock_agent.NewMockManager(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - - fsID1 := uuid.Must(uuid.NewV4()) - fsID2 := uuid.Must(uuid.NewV4()) - fsID3 := uuid.Must(uuid.NewV4()) - fsID4 := uuid.Must(uuid.NewV4()) - - mockFileSourceStore. - EXPECT(). - GetFileSources(). - Return([]*storepb.FileSourceInfo{ - { - ID: utils.ProtoFromUUID(fsID1), - }, - { - ID: utils.ProtoFromUUID(fsID2), - }, - { - ID: utils.ProtoFromUUID(fsID3), - }, - { - ID: utils.ProtoFromUUID(fsID4), - ExpectedState: statuspb.TERMINATED_STATE, - }, - }, nil) - - mockFileSourceStore. - EXPECT(). - GetFileSourceTTLs(). - Return([]uuid.UUID{ - fsID1, - fsID3, - fsID4, - }, []time.Time{ - time.Now().Add(1 * time.Hour), - time.Now().Add(-1 * time.Minute), - time.Now().Add(-1 * time.Hour), - }, nil) - - mockFileSourceStore. - EXPECT(). - GetFileSource(fsID2). - Return(&storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(fsID2), - }, nil) - - mockFileSourceStore. - EXPECT(). - GetFileSource(fsID3). - Return(&storepb.FileSourceInfo{ - ID: utils.ProtoFromUUID(fsID3), - }, nil) - - mockFileSourceStore. - EXPECT(). - UpsertFileSource(fsID2, &storepb.FileSourceInfo{ID: utils.ProtoFromUUID(fsID2), ExpectedState: statuspb.TERMINATED_STATE}). - Return(nil) - - mockFileSourceStore. - EXPECT(). - UpsertFileSource(fsID3, &storepb.FileSourceInfo{ID: utils.ProtoFromUUID(fsID3), ExpectedState: statuspb.TERMINATED_STATE}). - Return(nil) - - var wg sync.WaitGroup - wg.Add(2) - - var seenDeletions []string - msgHandler := func(msg []byte) error { - vzMsg := &messagespb.VizierMessage{} - err := proto.Unmarshal(msg, vzMsg) - require.NoError(t, err) - req := vzMsg.GetFileSourceMessage().GetRemoveFileSourceRequest() - assert.NotNil(t, req) - seenDeletions = append(seenDeletions, utils.ProtoToUUIDStr(req.ID)) - - wg.Done() - return nil - } - - mockAgtMgr. - EXPECT(). - MessageActiveAgents(gomock.Any()). - Times(2). - DoAndReturn(msgHandler) - - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 25*time.Millisecond) - defer fileSourceMgr.Close() - - wg.Wait() - assert.Contains(t, seenDeletions, fsID2.String()) - assert.Contains(t, seenDeletions, fsID3.String()) -} diff --git a/src/vizier/services/metadata/controllers/file_source/mock.go b/src/vizier/services/metadata/controllers/file_source/mock.go deleted file mode 100644 index d0ccdbec1e2..00000000000 --- a/src/vizier/services/metadata/controllers/file_source/mock.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package file_source - -//go:generate mockgen -source=file_source.go -destination=mock/mock_file_source.gen.go Store diff --git a/src/vizier/services/metadata/controllers/file_source/mock/BUILD.bazel b/src/vizier/services/metadata/controllers/file_source/mock/BUILD.bazel deleted file mode 100644 index fd215aac86e..00000000000 --- a/src/vizier/services/metadata/controllers/file_source/mock/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2018- The Pixie Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "mock", - srcs = ["mock_file_source.gen.go"], - importpath = "px.dev/pixie/src/vizier/services/metadata/controllers/file_source/mock", - visibility = ["//src/vizier:__subpackages__"], - deps = [ - "//src/vizier/services/metadata/storepb:store_pl_go_proto", - "@com_github_gofrs_uuid//:uuid", - "@com_github_golang_mock//gomock", - ], -) diff --git a/src/vizier/services/metadata/controllers/file_source/mock/mock_file_source.gen.go b/src/vizier/services/metadata/controllers/file_source/mock/mock_file_source.gen.go deleted file mode 100644 index 9ce88669a98..00000000000 --- a/src/vizier/services/metadata/controllers/file_source/mock/mock_file_source.gen.go +++ /dev/null @@ -1,277 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: file_source.go - -// Package mock_file_source is a generated GoMock package. -package mock_file_source - -import ( - reflect "reflect" - time "time" - - uuid "github.com/gofrs/uuid" - gomock "github.com/golang/mock/gomock" - storepb "px.dev/pixie/src/vizier/services/metadata/storepb" -) - -// MockagentMessenger is a mock of agentMessenger interface. -type MockagentMessenger struct { - ctrl *gomock.Controller - recorder *MockagentMessengerMockRecorder -} - -// MockagentMessengerMockRecorder is the mock recorder for MockagentMessenger. -type MockagentMessengerMockRecorder struct { - mock *MockagentMessenger -} - -// NewMockagentMessenger creates a new mock instance. -func NewMockagentMessenger(ctrl *gomock.Controller) *MockagentMessenger { - mock := &MockagentMessenger{ctrl: ctrl} - mock.recorder = &MockagentMessengerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockagentMessenger) EXPECT() *MockagentMessengerMockRecorder { - return m.recorder -} - -// MessageActiveAgents mocks base method. -func (m *MockagentMessenger) MessageActiveAgents(msg []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MessageActiveAgents", msg) - ret0, _ := ret[0].(error) - return ret0 -} - -// MessageActiveAgents indicates an expected call of MessageActiveAgents. -func (mr *MockagentMessengerMockRecorder) MessageActiveAgents(msg interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageActiveAgents", reflect.TypeOf((*MockagentMessenger)(nil).MessageActiveAgents), msg) -} - -// MessageAgents mocks base method. -func (m *MockagentMessenger) MessageAgents(agentIDs []uuid.UUID, msg []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MessageAgents", agentIDs, msg) - ret0, _ := ret[0].(error) - return ret0 -} - -// MessageAgents indicates an expected call of MessageAgents. -func (mr *MockagentMessengerMockRecorder) MessageAgents(agentIDs, msg interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageAgents", reflect.TypeOf((*MockagentMessenger)(nil).MessageAgents), agentIDs, msg) -} - -// MockStore is a mock of Store interface. -type MockStore struct { - ctrl *gomock.Controller - recorder *MockStoreMockRecorder -} - -// MockStoreMockRecorder is the mock recorder for MockStore. -type MockStoreMockRecorder struct { - mock *MockStore -} - -// NewMockStore creates a new mock instance. -func NewMockStore(ctrl *gomock.Controller) *MockStore { - mock := &MockStore{ctrl: ctrl} - mock.recorder = &MockStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStore) EXPECT() *MockStoreMockRecorder { - return m.recorder -} - -// DeleteFileSource mocks base method. -func (m *MockStore) DeleteFileSource(arg0 uuid.UUID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteFileSource", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteFileSource indicates an expected call of DeleteFileSource. -func (mr *MockStoreMockRecorder) DeleteFileSource(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFileSource", reflect.TypeOf((*MockStore)(nil).DeleteFileSource), arg0) -} - -// DeleteFileSourceTTLs mocks base method. -func (m *MockStore) DeleteFileSourceTTLs(arg0 []uuid.UUID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteFileSourceTTLs", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteFileSourceTTLs indicates an expected call of DeleteFileSourceTTLs. -func (mr *MockStoreMockRecorder) DeleteFileSourceTTLs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFileSourceTTLs", reflect.TypeOf((*MockStore)(nil).DeleteFileSourceTTLs), arg0) -} - -// DeleteFileSourcesForAgent mocks base method. -func (m *MockStore) DeleteFileSourcesForAgent(arg0 uuid.UUID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteFileSourcesForAgent", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteFileSourcesForAgent indicates an expected call of DeleteFileSourcesForAgent. -func (mr *MockStoreMockRecorder) DeleteFileSourcesForAgent(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFileSourcesForAgent", reflect.TypeOf((*MockStore)(nil).DeleteFileSourcesForAgent), arg0) -} - -// GetFileSource mocks base method. -func (m *MockStore) GetFileSource(arg0 uuid.UUID) (*storepb.FileSourceInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFileSource", arg0) - ret0, _ := ret[0].(*storepb.FileSourceInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFileSource indicates an expected call of GetFileSource. -func (mr *MockStoreMockRecorder) GetFileSource(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSource", reflect.TypeOf((*MockStore)(nil).GetFileSource), arg0) -} - -// GetFileSourceStates mocks base method. -func (m *MockStore) GetFileSourceStates(arg0 uuid.UUID) ([]*storepb.AgentFileSourceStatus, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFileSourceStates", arg0) - ret0, _ := ret[0].([]*storepb.AgentFileSourceStatus) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFileSourceStates indicates an expected call of GetFileSourceStates. -func (mr *MockStoreMockRecorder) GetFileSourceStates(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSourceStates", reflect.TypeOf((*MockStore)(nil).GetFileSourceStates), arg0) -} - -// GetFileSourceTTLs mocks base method. -func (m *MockStore) GetFileSourceTTLs() ([]uuid.UUID, []time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFileSourceTTLs") - ret0, _ := ret[0].([]uuid.UUID) - ret1, _ := ret[1].([]time.Time) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetFileSourceTTLs indicates an expected call of GetFileSourceTTLs. -func (mr *MockStoreMockRecorder) GetFileSourceTTLs() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSourceTTLs", reflect.TypeOf((*MockStore)(nil).GetFileSourceTTLs)) -} - -// GetFileSources mocks base method. -func (m *MockStore) GetFileSources() ([]*storepb.FileSourceInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFileSources") - ret0, _ := ret[0].([]*storepb.FileSourceInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFileSources indicates an expected call of GetFileSources. -func (mr *MockStoreMockRecorder) GetFileSources() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSources", reflect.TypeOf((*MockStore)(nil).GetFileSources)) -} - -// GetFileSourcesForIDs mocks base method. -func (m *MockStore) GetFileSourcesForIDs(arg0 []uuid.UUID) ([]*storepb.FileSourceInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFileSourcesForIDs", arg0) - ret0, _ := ret[0].([]*storepb.FileSourceInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFileSourcesForIDs indicates an expected call of GetFileSourcesForIDs. -func (mr *MockStoreMockRecorder) GetFileSourcesForIDs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSourcesForIDs", reflect.TypeOf((*MockStore)(nil).GetFileSourcesForIDs), arg0) -} - -// GetFileSourcesWithNames mocks base method. -func (m *MockStore) GetFileSourcesWithNames(arg0 []string) ([]*uuid.UUID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFileSourcesWithNames", arg0) - ret0, _ := ret[0].([]*uuid.UUID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFileSourcesWithNames indicates an expected call of GetFileSourcesWithNames. -func (mr *MockStoreMockRecorder) GetFileSourcesWithNames(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFileSourcesWithNames", reflect.TypeOf((*MockStore)(nil).GetFileSourcesWithNames), arg0) -} - -// SetFileSourceTTL mocks base method. -func (m *MockStore) SetFileSourceTTL(arg0 uuid.UUID, arg1 time.Duration) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetFileSourceTTL", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetFileSourceTTL indicates an expected call of SetFileSourceTTL. -func (mr *MockStoreMockRecorder) SetFileSourceTTL(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFileSourceTTL", reflect.TypeOf((*MockStore)(nil).SetFileSourceTTL), arg0, arg1) -} - -// SetFileSourceWithName mocks base method. -func (m *MockStore) SetFileSourceWithName(arg0 string, arg1 uuid.UUID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetFileSourceWithName", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetFileSourceWithName indicates an expected call of SetFileSourceWithName. -func (mr *MockStoreMockRecorder) SetFileSourceWithName(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFileSourceWithName", reflect.TypeOf((*MockStore)(nil).SetFileSourceWithName), arg0, arg1) -} - -// UpdateFileSourceState mocks base method. -func (m *MockStore) UpdateFileSourceState(arg0 *storepb.AgentFileSourceStatus) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateFileSourceState", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateFileSourceState indicates an expected call of UpdateFileSourceState. -func (mr *MockStoreMockRecorder) UpdateFileSourceState(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFileSourceState", reflect.TypeOf((*MockStore)(nil).UpdateFileSourceState), arg0) -} - -// UpsertFileSource mocks base method. -func (m *MockStore) UpsertFileSource(arg0 uuid.UUID, arg1 *storepb.FileSourceInfo) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpsertFileSource", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpsertFileSource indicates an expected call of UpsertFileSource. -func (mr *MockStoreMockRecorder) UpsertFileSource(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertFileSource", reflect.TypeOf((*MockStore)(nil).UpsertFileSource), arg0, arg1) -} diff --git a/src/vizier/services/metadata/controllers/message_bus.go b/src/vizier/services/metadata/controllers/message_bus.go index 2a2be881592..fafee905dbc 100644 --- a/src/vizier/services/metadata/controllers/message_bus.go +++ b/src/vizier/services/metadata/controllers/message_bus.go @@ -23,7 +23,6 @@ import ( log "github.com/sirupsen/logrus" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" - "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" "px.dev/pixie/src/vizier/services/metadata/controllers/k8smeta" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" ) @@ -53,8 +52,9 @@ type MessageBusController struct { // NewMessageBusController creates a new controller for handling NATS messages. func NewMessageBusController(conn *nats.Conn, agtMgr agent.Manager, - tpMgr *tracepoint.Manager, fsMgr *file_source.Manager, k8smetaHandler *k8smeta.Handler, - isLeader *bool) (*MessageBusController, error) { + tpMgr *tracepoint.Manager, k8smetaHandler *k8smeta.Handler, + isLeader *bool, +) (*MessageBusController, error) { ch := make(chan *nats.Msg, 8192) listeners := make(map[string]TopicListener) subscriptions := make([]*nats.Subscription, 0) @@ -67,7 +67,7 @@ func NewMessageBusController(conn *nats.Conn, agtMgr agent.Manager, subscriptions: subscriptions, } - err := mc.registerListeners(agtMgr, tpMgr, fsMgr, k8smetaHandler) + err := mc.registerListeners(agtMgr, tpMgr, k8smetaHandler) if err != nil { return nil, err } @@ -110,9 +110,9 @@ func (mc *MessageBusController) handleMessages() { } } -func (mc *MessageBusController) registerListeners(agtMgr agent.Manager, tpMgr *tracepoint.Manager, fsMgr *file_source.Manager, k8smetaHandler *k8smeta.Handler) error { +func (mc *MessageBusController) registerListeners(agtMgr agent.Manager, tpMgr *tracepoint.Manager, k8smetaHandler *k8smeta.Handler) error { // Register AgentTopicListener. - atl, err := NewAgentTopicListener(agtMgr, tpMgr, fsMgr, mc.sendMessage) + atl, err := NewAgentTopicListener(agtMgr, tpMgr, mc.sendMessage) if err != nil { return err } diff --git a/src/vizier/services/metadata/controllers/server.go b/src/vizier/services/metadata/controllers/server.go index 384ab215451..8c4a11eebe9 100644 --- a/src/vizier/services/metadata/controllers/server.go +++ b/src/vizier/services/metadata/controllers/server.go @@ -41,7 +41,6 @@ import ( "px.dev/pixie/src/table_store/schemapb" "px.dev/pixie/src/utils" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" - "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" "px.dev/pixie/src/vizier/services/metadata/controllers/k8smeta" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" "px.dev/pixie/src/vizier/services/metadata/metadataenv" @@ -62,7 +61,6 @@ type Server struct { pls k8smeta.PodLabelStore agtMgr agent.Manager tpMgr *tracepoint.Manager - fsMgr *file_source.Manager // The current cursor that is actively running the GetAgentsUpdate stream. Only one GetAgentsUpdate // stream should be running at a time. getAgentsCursor uuid.UUID @@ -70,14 +68,13 @@ type Server struct { } // NewServer creates GRPC handlers. -func NewServer(env metadataenv.MetadataEnv, ds datastore.MultiGetterSetterDeleterCloser, pls k8smeta.PodLabelStore, agtMgr agent.Manager, tpMgr *tracepoint.Manager, fsMgr *file_source.Manager) *Server { +func NewServer(env metadataenv.MetadataEnv, ds datastore.MultiGetterSetterDeleterCloser, pls k8smeta.PodLabelStore, agtMgr agent.Manager, tpMgr *tracepoint.Manager) *Server { return &Server{ env: env, ds: ds, pls: pls, agtMgr: agtMgr, tpMgr: tpMgr, - fsMgr: fsMgr, } } @@ -101,9 +98,6 @@ func convertToRelationMap(computedSchema *storepb.ComputedSchema) (*schemapb.Sch Columns: columnPbs, Desc: schema.Desc, } - if schema.MutationId != "" { - schemaPb.MutationId = schema.MutationId - } respSchemaPb.RelationMap[schema.Name] = schemaPb } @@ -127,9 +121,6 @@ func convertToSchemaInfo(computedSchema *storepb.ComputedSchema) ([]*distributed schemaPb := &schemapb.Relation{ Columns: columnPbs, } - if schema.MutationId != "" { - schemaPb.MutationId = schema.MutationId - } agentIDs, ok := computedSchema.TableNameToAgentIDs[schema.Name] if !ok { @@ -574,55 +565,6 @@ func getTracepointStateFromAgentTracepointStates(agentStates []*storepb.AgentTra return statuspb.UNKNOWN_STATE, []*statuspb.Status{} } -func getFileSourceStateFromAgentFileSourceStates(agentStates []*storepb.AgentFileSourceStatus) (statuspb.LifeCycleState, []*statuspb.Status) { - if len(agentStates) == 0 { - return statuspb.PENDING_STATE, nil - } - - numFailed := 0 - numTerminated := 0 - numPending := 0 - numRunning := 0 - statuses := make([]*statuspb.Status, 0) - - for _, s := range agentStates { - switch s.State { - case statuspb.TERMINATED_STATE: - numTerminated++ - case statuspb.FAILED_STATE: - numFailed++ - if s.Status.ErrCode != statuspb.FAILED_PRECONDITION && s.Status.ErrCode != statuspb.OK { - statuses = append(statuses, s.Status) - } - case statuspb.PENDING_STATE: - numPending++ - case statuspb.RUNNING_STATE: - numRunning++ - } - } - - if numTerminated > 0 { // If any agentFileSources are terminated, then we consider the tracepoint in an terminated state. - return statuspb.TERMINATED_STATE, []*statuspb.Status{} - } - - if numRunning > 0 { // If a single agentFileSource is running, then we consider the overall tracepoint as healthy. - return statuspb.RUNNING_STATE, []*statuspb.Status{} - } - - if numPending > 0 { // If no agentFileSources are running, but some are in a pending state, the tracepoint is pending. - return statuspb.PENDING_STATE, []*statuspb.Status{} - } - - if numFailed > 0 { // If there are no terminated/running/pending tracepoints, then the tracepoint is failed. - if len(statuses) == 0 { - return statuspb.FAILED_STATE, []*statuspb.Status{agentStates[0].Status} // If there are no non FAILED_PRECONDITION statuses, just use the error from the first agent. - } - return statuspb.FAILED_STATE, statuses - } - - return statuspb.UNKNOWN_STATE, []*statuspb.Status{} -} - // RemoveTracepoint is a request to evict the given tracepoint on all agents. func (s *Server) RemoveTracepoint(ctx context.Context, req *metadatapb.RemoveTracepointRequest) (*metadatapb.RemoveTracepointResponse, error) { err := s.tpMgr.RemoveTracepoints(req.Names) @@ -637,132 +579,6 @@ func (s *Server) RemoveTracepoint(ctx context.Context, req *metadatapb.RemoveTra }, nil } -// RegisterFileSource is a request to register the file sources specified in the FileSourceDeployment on all agents. -func (s *Server) RegisterFileSource(ctx context.Context, req *metadatapb.RegisterFileSourceRequest) (*metadatapb.RegisterFileSourceResponse, error) { - responses := make([]*metadatapb.RegisterFileSourceResponse_FileSourceStatus, len(req.Requests)) - - // Create file source. - for i, fs := range req.Requests { - // TODO(ddelnano): Consider adding support for filtering by labels. - fileSourceID, err := s.fsMgr.CreateFileSource(fs.Name, fs) - if err != nil && err != file_source.ErrFileSourceAlreadyExists { - return nil, err - } - if err == file_source.ErrFileSourceAlreadyExists { - responses[i] = &metadatapb.RegisterFileSourceResponse_FileSourceStatus{ - ID: utils.ProtoFromUUID(*fileSourceID), - Status: &statuspb.Status{ - ErrCode: statuspb.ALREADY_EXISTS, - }, - Name: fs.Name, - } - continue - } - - responses[i] = &metadatapb.RegisterFileSourceResponse_FileSourceStatus{ - ID: utils.ProtoFromUUID(*fileSourceID), - Status: &statuspb.Status{ - ErrCode: statuspb.OK, - }, - Name: fs.Name, - } - - // Get all agents currently running. - agents, err := s.agtMgr.GetActiveAgents() - if err != nil { - return nil, err - } - - err = s.fsMgr.RegisterFileSource(agents, *fileSourceID, fs) - if err != nil { - return nil, err - } - } - - resp := &metadatapb.RegisterFileSourceResponse{ - FileSources: responses, - Status: &statuspb.Status{ - ErrCode: statuspb.OK, - }, - } - - return resp, nil -} - -// GetFileSourceInfo is a request to check the status for the given file source. -func (s *Server) GetFileSourceInfo(ctx context.Context, req *metadatapb.GetFileSourceInfoRequest) (*metadatapb.GetFileSourceInfoResponse, error) { - var fileSourceInfos []*storepb.FileSourceInfo - var err error - if len(req.IDs) > 0 { - ids := make([]uuid.UUID, len(req.IDs)) - for i, id := range req.IDs { - ids[i] = utils.UUIDFromProtoOrNil(id) - } - - fileSourceInfos, err = s.fsMgr.GetFileSourcesForIDs(ids) - } else { - fileSourceInfos, err = s.fsMgr.GetAllFileSources() - } - - if err != nil { - return nil, err - } - - fileSourceState := make([]*metadatapb.GetFileSourceInfoResponse_FileSourceState, len(fileSourceInfos)) - - for i, fs := range fileSourceInfos { - if fs == nil { // FileSourceDeployment does not exist. - fileSourceState[i] = &metadatapb.GetFileSourceInfoResponse_FileSourceState{ - ID: req.IDs[i], - State: statuspb.UNKNOWN_STATE, - Statuses: []*statuspb.Status{{ - ErrCode: statuspb.NOT_FOUND, - }}, - } - continue - } - tUUID := utils.UUIDFromProtoOrNil(fs.ID) - - fileSourceStates, err := s.fsMgr.GetFileSourceStates(tUUID) - if err != nil { - return nil, err - } - - state, statuses := getFileSourceStateFromAgentFileSourceStates(fileSourceStates) - - // TODO(ddelnano): For now file sources only have one schema - schemas := make([]string, 1) - schemas[0] = fs.FileSource.TableName - - fileSourceState[i] = &metadatapb.GetFileSourceInfoResponse_FileSourceState{ - ID: fs.ID, - State: state, - Statuses: statuses, - Name: fs.Name, - ExpectedState: fs.ExpectedState, - SchemaNames: schemas, - } - } - - return &metadatapb.GetFileSourceInfoResponse{ - FileSources: fileSourceState, - }, nil -} - -// RemoveFileSource is a request to evict the given file sources on all agents. -func (s *Server) RemoveFileSource(ctx context.Context, req *metadatapb.RemoveFileSourceRequest) (*metadatapb.RemoveFileSourceResponse, error) { - err := s.fsMgr.RemoveFileSources(req.Names) - if err != nil { - return nil, err - } - - return &metadatapb.RemoveFileSourceResponse{ - Status: &statuspb.Status{ - ErrCode: statuspb.OK, - }, - }, nil -} - // UpdateConfig updates the config for the specified agent. func (s *Server) UpdateConfig(ctx context.Context, req *metadatapb.UpdateConfigRequest) (*metadatapb.UpdateConfigResponse, error) { splitName := strings.Split(req.AgentPodName, "/") diff --git a/src/vizier/services/metadata/controllers/server_test.go b/src/vizier/services/metadata/controllers/server_test.go index bfa36e4d2c9..9a9dc844c9a 100644 --- a/src/vizier/services/metadata/controllers/server_test.go +++ b/src/vizier/services/metadata/controllers/server_test.go @@ -55,8 +55,6 @@ import ( "px.dev/pixie/src/vizier/messages/messagespb" "px.dev/pixie/src/vizier/services/metadata/controllers" mock_agent "px.dev/pixie/src/vizier/services/metadata/controllers/agent/mock" - "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" - mock_file_source "px.dev/pixie/src/vizier/services/metadata/controllers/file_source/mock" "px.dev/pixie/src/vizier/services/metadata/controllers/testutils" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" mock_tracepoint "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint/mock" @@ -67,7 +65,7 @@ import ( ) func testTableInfos() []*storepb.TableInfo { - tableInfos := make([]*storepb.TableInfo, 3) + tableInfos := make([]*storepb.TableInfo, 2) schema1Cols := make([]*storepb.TableInfo_ColumnInfo, 3) schema1Cols[0] = &storepb.TableInfo_ColumnInfo{ @@ -102,17 +100,6 @@ func testTableInfos() []*storepb.TableInfo { Columns: schema2Cols, Desc: "table 2 desc", } - schema3Cols := make([]*storepb.TableInfo_ColumnInfo, 1) - schema3Cols[0] = &storepb.TableInfo_ColumnInfo{ - Name: "t3Col1", - DataType: 1, - } - tableInfos[2] = &storepb.TableInfo{ - Name: "table3", - Columns: schema3Cols, - Desc: "table 3 desc", - MutationId: "mutation id", - } return tableInfos } @@ -178,7 +165,7 @@ func TestGetAgentInfo(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil, nil) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil) req := metadatapb.AgentInfoRequest{} @@ -224,7 +211,7 @@ func TestGetAgentInfoGetActiveAgentsFailed(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil, nil) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil) req := metadatapb.AgentInfoRequest{} @@ -253,7 +240,7 @@ func TestGetSchemas(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil, nil) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, nil) req := metadatapb.SchemaRequest{} @@ -262,7 +249,7 @@ func TestGetSchemas(t *testing.T) { require.NoError(t, err) assert.NotNil(t, resp) - assert.Equal(t, 3, len(resp.Schema.RelationMap)) + assert.Equal(t, 2, len(resp.Schema.RelationMap)) assert.Equal(t, "table 1 desc", resp.Schema.RelationMap["table1"].Desc) assert.Equal(t, 3, len(resp.Schema.RelationMap["table1"].Columns)) assert.Equal(t, "t1Col1", resp.Schema.RelationMap["table1"].Columns[0].ColumnName) @@ -361,7 +348,7 @@ func Test_Server_RegisterTracepoint(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, nil) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) reqs := []*metadatapb.RegisterTracepointRequest_TracepointRequest{ { @@ -486,7 +473,7 @@ func Test_Server_RegisterTracepoint_Exists(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, nil) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) reqs := []*metadatapb.RegisterTracepointRequest_TracepointRequest{ { @@ -626,10 +613,8 @@ func Test_Server_GetTracepointInfo(t *testing.T) { defer ctrl.Finish() mockAgtMgr := mock_agent.NewMockManager(ctrl) mockTracepointStore := mock_tracepoint.NewMockStore(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) tracepointMgr := tracepoint.NewManager(mockTracepointStore, mockAgtMgr, 5*time.Second) - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) program := &logicalpb.TracepointDeployment{ Programs: []*logicalpb.TracepointDeployment_TracepointProgram{ @@ -673,7 +658,7 @@ func Test_Server_GetTracepointInfo(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, fileSourceMgr) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) req := metadatapb.GetTracepointInfoRequest{ IDs: []*uuidpb.UUID{utils.ProtoFromUUID(tID)}, } @@ -707,10 +692,8 @@ func Test_Server_RemoveTracepoint(t *testing.T) { defer ctrl.Finish() mockAgtMgr := mock_agent.NewMockManager(ctrl) mockTracepointStore := mock_tracepoint.NewMockStore(ctrl) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) tracepointMgr := tracepoint.NewManager(mockTracepointStore, mockAgtMgr, 5*time.Second) - fileSourceMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) tpID1 := uuid.Must(uuid.NewV4()) tpID2 := uuid.Must(uuid.NewV4()) @@ -733,7 +716,7 @@ func Test_Server_RemoveTracepoint(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, fileSourceMgr) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) req := metadatapb.RemoveTracepointRequest{ Names: []string{"test1", "test2"}, @@ -848,9 +831,6 @@ func TestGetAgentUpdates(t *testing.T) { "table2": { AgentID: []*uuidpb.UUID{u1pb}, }, - "table3": { - AgentID: []*uuidpb.UUID{u1pb, u2pb}, - }, }, } @@ -922,7 +902,7 @@ func TestGetAgentUpdates(t *testing.T) { t.Fatal("Failed to create api environment.") } - srv := controllers.NewServer(mdEnv, nil, nil, mockAgtMgr, nil, nil) + srv := controllers.NewServer(mdEnv, nil, nil, mockAgtMgr, nil) env := env.New("withpixie.ai") s := server.CreateGRPCServer(env, &server.GRPCServerOptions{}) @@ -1032,7 +1012,7 @@ func TestGetAgentUpdates(t *testing.T) { assert.Equal(t, 1, len(r1.AgentUpdates)) assert.Equal(t, updates1[2], r1.AgentUpdates[0]) // Check schemas - assert.Equal(t, 3, len(r1.AgentSchemas)) + assert.Equal(t, 2, len(r1.AgentSchemas)) assert.Equal(t, "table1", r1.AgentSchemas[0].Name) assert.Equal(t, 3, len(r1.AgentSchemas[0].Relation.Columns)) assert.Equal(t, 2, len(r1.AgentSchemas[0].AgentList)) @@ -1042,12 +1022,6 @@ func TestGetAgentUpdates(t *testing.T) { assert.Equal(t, 2, len(r1.AgentSchemas[1].Relation.Columns)) assert.Equal(t, 1, len(r1.AgentSchemas[1].AgentList)) assert.Equal(t, u1pb, r1.AgentSchemas[1].AgentList[0]) - assert.Equal(t, "table3", r1.AgentSchemas[2].Name) - assert.Equal(t, 1, len(r1.AgentSchemas[2].Relation.Columns)) - assert.Equal(t, 2, len(r1.AgentSchemas[2].AgentList)) - assert.Equal(t, u1pb, r1.AgentSchemas[2].AgentList[0]) - assert.Equal(t, u2pb, r1.AgentSchemas[2].AgentList[1]) - assert.Equal(t, "mutation id", r1.AgentSchemas[2].Relation.MutationId) // Check empty message r2 := resps[2] @@ -1078,9 +1052,6 @@ func Test_Server_UpdateConfig(t *testing.T) { mockTracepointStore := mock_tracepoint.NewMockStore(ctrl) tracepointMgr := tracepoint.NewManager(mockTracepointStore, mockAgtMgr, 5*time.Second) - mockFileSourceStore := mock_file_source.NewMockStore(ctrl) - fsMgr := file_source.NewManager(mockFileSourceStore, mockAgtMgr, 5*time.Second) - mockAgtMgr. EXPECT(). UpdateConfig("pl", "pem-1234", "gprof", "true"). @@ -1092,7 +1063,7 @@ func Test_Server_UpdateConfig(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr, fsMgr) + s := controllers.NewServer(env, nil, nil, mockAgtMgr, tracepointMgr) req := metadatapb.UpdateConfigRequest{ AgentPodName: "pl/pem-1234", @@ -1133,7 +1104,7 @@ func Test_Server_ConvertLabelsToPods(t *testing.T) { t.Fatal("Failed to create api environment.") } - s := controllers.NewServer(env, nil, pls, nil, nil, nil) + s := controllers.NewServer(env, nil, pls, nil, nil) program := &logicalpb.TracepointDeployment{} err = proto.UnmarshalText(testutils.TDLabelSelectorPb, program) diff --git a/src/vizier/services/metadata/local/BUILD.bazel b/src/vizier/services/metadata/local/BUILD.bazel deleted file mode 100644 index 1f2ae16792f..00000000000 --- a/src/vizier/services/metadata/local/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2018- The Pixie Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -load("//bazel:pl_build_system.bzl", "pl_cc_library") - -package(default_visibility = [ - "//src/carnot:__subpackages__", - "//src/experimental:__subpackages__", - "//src/vizier:__subpackages__", -]) - -pl_cc_library( - name = "cc_library", - hdrs = ["local_metadata_service.h"], - deps = [ - "//src/table_store:cc_library", - "//src/vizier/services/metadata/metadatapb:service_pl_cc_proto", - "@com_github_grpc_grpc//:grpc++", - ], -) diff --git a/src/vizier/services/metadata/local/local_metadata_service.h b/src/vizier/services/metadata/local/local_metadata_service.h deleted file mode 100644 index e1ac86ffdda..00000000000 --- a/src/vizier/services/metadata/local/local_metadata_service.h +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright 2018- The Pixie Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#pragma once - -#include -#include -#include -#include - -#include "src/common/base/base.h" -#include "src/table_store/table_store.h" -#include "src/vizier/services/metadata/metadatapb/service.grpc.pb.h" -#include "src/vizier/services/metadata/metadatapb/service.pb.h" - -namespace px { -namespace vizier { -namespace services { -namespace metadata { - -/** - * LocalMetadataServiceImpl implements a local stub for the MetadataService. - * Only GetSchemas is implemented - it reads from the table store. - * All other methods return UNIMPLEMENTED status. - * - * This is useful for testing and local execution environments where - * a full metadata service is not available. - */ -class LocalMetadataServiceImpl final : public MetadataService::Service { - public: - LocalMetadataServiceImpl() = delete; - explicit LocalMetadataServiceImpl(table_store::TableStore* table_store) - : table_store_(table_store) {} - - ::grpc::Status GetSchemas(::grpc::ServerContext*, const SchemaRequest*, - SchemaResponse* response) override { - - // Get all table IDs from the table store - auto table_ids = table_store_->GetTableIDs(); - - // Build the schema response - auto* schema = response->mutable_schema(); - - for (const auto& table_id : table_ids) { - // Get the table name - std::string table_name = table_store_->GetTableName(table_id); - if (table_name.empty()) { - LOG(WARNING) << "Failed to get table name for ID: " << table_id; - continue; - } - - // Get the table object - auto* table = table_store_->GetTable(table_id); - if (table == nullptr) { - LOG(WARNING) << "Failed to get table for ID: " << table_id; - continue; - } - - // Get the relation from the table - auto relation = table->GetRelation(); - - // Add to the relation map in the schema - // The map value is a Relation proto directly - auto& rel_proto = (*schema->mutable_relation_map())[table_name]; - - // Add columns to the relation - for (size_t i = 0; i < relation.NumColumns(); ++i) { - auto* col = rel_proto.add_columns(); - col->set_column_name(relation.GetColumnName(i)); - col->set_column_type(relation.GetColumnType(i)); - col->set_column_desc(""); // No description available from table store - col->set_pattern_type(types::PatternType::GENERAL); - } - - // Set table description (empty for now) - rel_proto.set_desc(""); - } - - return ::grpc::Status::OK; - } - - ::grpc::Status GetAgentUpdates(::grpc::ServerContext*, const AgentUpdatesRequest*, - ::grpc::ServerWriter*) override { - return ::grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "GetAgentUpdates not implemented"); - } - - ::grpc::Status GetAgentInfo(::grpc::ServerContext*, const AgentInfoRequest*, - AgentInfoResponse* response) override { - - // Create a single agent metadata entry for local testing - auto* agent_metadata = response->add_info(); - - // Set up Agent information - auto* agent = agent_metadata->mutable_agent(); - auto* agent_info = agent->mutable_info(); - - // Generate a fixed UUID for the agent (using a realistic looking UUID) - // UUID: 12345678-1234-1234-1234-123456789abc - auto* agent_id = agent_info->mutable_agent_id(); - agent_id->set_high_bits(0x1234567812341234); - agent_id->set_low_bits(0x1234123456789abc); - - // Set up host information - auto* host_info = agent_info->mutable_host_info(); - host_info->set_hostname("local-test-host"); - host_info->set_pod_name("local-pem-pod"); - host_info->set_host_ip("127.0.0.1"); - - // Set kernel version (example: 5.15.0) - auto* kernel = host_info->mutable_kernel(); - kernel->set_version(5); - kernel->set_major_rev(15); - kernel->set_minor_rev(0); - host_info->set_kernel_headers_installed(true); - - // Set agent capabilities and parameters - agent_info->set_ip_address("127.0.0.1"); - auto* capabilities = agent_info->mutable_capabilities(); - capabilities->set_collects_data(true); - - auto* parameters = agent_info->mutable_parameters(); - parameters->set_profiler_stack_trace_sample_period_ms(100); - - // Set agent timestamps and ASID - auto current_time_ns = std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); - agent->set_create_time_ns(current_time_ns); - agent->set_last_heartbeat_ns(current_time_ns); - agent->set_asid(0); - - // Set up AgentStatus - auto* status = agent_metadata->mutable_status(); - status->set_ns_since_last_heartbeat(0); - status->set_state( - px::vizier::services::shared::agent::AgentState::AGENT_STATE_HEALTHY); - - // Set up CarnotInfo - auto* carnot_info = agent_metadata->mutable_carnot_info(); - carnot_info->set_query_broker_address("local-pem:50300"); - auto* carnot_agent_id = carnot_info->mutable_agent_id(); - carnot_agent_id->set_high_bits(0x1234567812341234); - carnot_agent_id->set_low_bits(0x1234123456789abc); - carnot_info->set_has_grpc_server(true); - carnot_info->set_grpc_address("local-pem:50300"); - carnot_info->set_has_data_store(true); - carnot_info->set_processes_data(true); - carnot_info->set_accepts_remote_sources(false); - carnot_info->set_asid(0); - - return ::grpc::Status::OK; - } - - ::grpc::Status GetWithPrefixKey(::grpc::ServerContext*, const WithPrefixKeyRequest*, - WithPrefixKeyResponse*) override { - return ::grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "GetWithPrefixKey not implemented"); - } - - private: - table_store::TableStore* table_store_; -}; - -/** - * LocalMetadataGRPCServer wraps the LocalMetadataServiceImpl and provides a gRPC server. - * Uses in-process communication for efficiency. - */ -class LocalMetadataGRPCServer { - public: - LocalMetadataGRPCServer() = delete; - explicit LocalMetadataGRPCServer(table_store::TableStore* table_store) - : metadata_service_(std::make_unique(table_store)) { - grpc::ServerBuilder builder; - - // Use in-process communication - builder.RegisterService(metadata_service_.get()); - - grpc_server_ = builder.BuildAndStart(); - CHECK(grpc_server_ != nullptr); - - LOG(INFO) << "Starting Local Metadata service (in-process)"; - } - - void Stop() { - if (grpc_server_) { - grpc_server_->Shutdown(); - } - grpc_server_.reset(nullptr); - } - - ~LocalMetadataGRPCServer() { Stop(); } - - std::shared_ptr StubGenerator() const { - grpc::ChannelArguments args; - // NewStub returns unique_ptr, convert to shared_ptr - return std::shared_ptr( - MetadataService::NewStub(grpc_server_->InProcessChannel(args))); - } - - private: - std::unique_ptr grpc_server_; - std::unique_ptr metadata_service_; -}; - -} // namespace metadata -} // namespace services -} // namespace vizier -} // namespace px diff --git a/src/vizier/services/metadata/metadata_server.go b/src/vizier/services/metadata/metadata_server.go index 791de70340f..3533ba17f9b 100644 --- a/src/vizier/services/metadata/metadata_server.go +++ b/src/vizier/services/metadata/metadata_server.go @@ -49,7 +49,6 @@ import ( "px.dev/pixie/src/vizier/services/metadata/controllers" "px.dev/pixie/src/vizier/services/metadata/controllers/agent" "px.dev/pixie/src/vizier/services/metadata/controllers/cronscript" - "px.dev/pixie/src/vizier/services/metadata/controllers/file_source" "px.dev/pixie/src/vizier/services/metadata/controllers/k8smeta" "px.dev/pixie/src/vizier/services/metadata/controllers/tracepoint" "px.dev/pixie/src/vizier/services/metadata/metadataenv" @@ -272,12 +271,7 @@ func main() { tracepointMgr := tracepoint.NewManager(tds, agtMgr, 30*time.Second) defer tracepointMgr.Close() - fds := file_source.NewDatastore(dataStore) - // Initialize file source handler. - fsMgr := file_source.NewManager(fds, agtMgr, 30*time.Second) - defer fsMgr.Close() - - mc, err := controllers.NewMessageBusController(nc, agtMgr, tracepointMgr, fsMgr, + mc, err := controllers.NewMessageBusController(nc, agtMgr, tracepointMgr, mdh, &isLeader) if err != nil { log.WithError(err).Fatal("Failed to connect to message bus") @@ -295,7 +289,7 @@ func main() { healthz.RegisterDefaultChecks(mux) metrics.MustRegisterMetricsHandlerNoDefaultMetrics(mux) - svr := controllers.NewServer(env, dataStore, k8sMds, agtMgr, tracepointMgr, fsMgr) + svr := controllers.NewServer(env, dataStore, k8sMds, agtMgr, tracepointMgr) csDs := cronscript.NewDatastore(dataStore) cronScriptSvr := cronscript.New(csDs) @@ -310,7 +304,6 @@ func main() { httpmiddleware.WithBearerAuthMiddleware(env, mux), maxMsgSize) metadatapb.RegisterMetadataServiceServer(s.GRPCServer(), svr) metadatapb.RegisterMetadataTracepointServiceServer(s.GRPCServer(), svr) - metadatapb.RegisterMetadataFileSourceServiceServer(s.GRPCServer(), svr) metadatapb.RegisterMetadataConfigServiceServer(s.GRPCServer(), svr) metadatapb.RegisterCronScriptStoreServiceServer(s.GRPCServer(), cronScriptSvr) diff --git a/src/vizier/services/metadata/metadatapb/BUILD.bazel b/src/vizier/services/metadata/metadatapb/BUILD.bazel index 153d3a5fe09..11b8b4962db 100644 --- a/src/vizier/services/metadata/metadatapb/BUILD.bazel +++ b/src/vizier/services/metadata/metadatapb/BUILD.bazel @@ -19,16 +19,11 @@ load("//bazel:proto_compile.bzl", "pl_cc_proto_library", "pl_go_proto_library", pl_proto_library( name = "service_pl_proto", srcs = ["service.proto"], - visibility = [ - "//src/carnot:__subpackages__", - "//src/experimental:__subpackages__", - "//src/vizier:__subpackages__", - ], + visibility = ["//src/vizier:__subpackages__"], deps = [ "//src/api/proto/uuidpb:uuid_pl_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_proto", - "//src/carnot/planner/file_source/ir:logical_pl_proto", "//src/common/base/statuspb:status_pl_proto", "//src/shared/cvmsgspb:cvmsgs_pl_proto", "//src/shared/types/typespb:types_pl_proto", @@ -42,16 +37,11 @@ pl_proto_library( pl_cc_proto_library( name = "service_pl_cc_proto", proto = ":service_pl_proto", - visibility = [ - "//src/carnot:__subpackages__", - "//src/experimental:__subpackages__", - "//src/vizier:__subpackages__", - ], + visibility = ["//src/vizier:__subpackages__"], deps = [ "//src/api/proto/uuidpb:uuid_pl_cc_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_cc_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", - "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/common/base/statuspb:status_pl_cc_proto", "//src/shared/cvmsgspb:cvmsgs_pl_cc_proto", "//src/shared/types/typespb/wrapper:cc_library", @@ -71,7 +61,6 @@ pl_go_proto_library( "//src/api/proto/uuidpb:uuid_pl_go_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_go_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto", - "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/common/base/statuspb:status_pl_go_proto", "//src/shared/cvmsgspb:cvmsgs_pl_go_proto", "//src/shared/types/typespb:types_pl_go_proto", diff --git a/src/vizier/services/metadata/metadatapb/service.pb.go b/src/vizier/services/metadata/metadatapb/service.pb.go index 52f764c4892..64e34931455 100755 --- a/src/vizier/services/metadata/metadatapb/service.pb.go +++ b/src/vizier/services/metadata/metadatapb/service.pb.go @@ -20,7 +20,6 @@ import ( uuidpb "px.dev/pixie/src/api/proto/uuidpb" distributedpb "px.dev/pixie/src/carnot/planner/distributedpb" logicalpb "px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb" - ir "px.dev/pixie/src/carnot/planner/file_source/ir" statuspb "px.dev/pixie/src/common/base/statuspb" cvmsgspb "px.dev/pixie/src/shared/cvmsgspb" schemapb "px.dev/pixie/src/table_store/schemapb" @@ -625,21 +624,21 @@ func (m *WithPrefixKeyResponse_KV) GetValue() []byte { return nil } -type RegisterFileSourceRequest struct { - Requests []*ir.FileSourceDeployment `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` +type RegisterTracepointRequest struct { + Requests []*RegisterTracepointRequest_TracepointRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } -func (m *RegisterFileSourceRequest) Reset() { *m = RegisterFileSourceRequest{} } -func (*RegisterFileSourceRequest) ProtoMessage() {} -func (*RegisterFileSourceRequest) Descriptor() ([]byte, []int) { +func (m *RegisterTracepointRequest) Reset() { *m = RegisterTracepointRequest{} } +func (*RegisterTracepointRequest) ProtoMessage() {} +func (*RegisterTracepointRequest) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{10} } -func (m *RegisterFileSourceRequest) XXX_Unmarshal(b []byte) error { +func (m *RegisterTracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterFileSourceRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterTracepointRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -649,41 +648,102 @@ func (m *RegisterFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *RegisterFileSourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterFileSourceRequest.Merge(m, src) +func (m *RegisterTracepointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterTracepointRequest.Merge(m, src) } -func (m *RegisterFileSourceRequest) XXX_Size() int { +func (m *RegisterTracepointRequest) XXX_Size() int { return m.Size() } -func (m *RegisterFileSourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterFileSourceRequest.DiscardUnknown(m) +func (m *RegisterTracepointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterTracepointRequest.DiscardUnknown(m) } -var xxx_messageInfo_RegisterFileSourceRequest proto.InternalMessageInfo +var xxx_messageInfo_RegisterTracepointRequest proto.InternalMessageInfo -func (m *RegisterFileSourceRequest) GetRequests() []*ir.FileSourceDeployment { +func (m *RegisterTracepointRequest) GetRequests() []*RegisterTracepointRequest_TracepointRequest { if m != nil { return m.Requests } return nil } -type RegisterFileSourceResponse struct { - FileSources []*RegisterFileSourceResponse_FileSourceStatus `protobuf:"bytes,1,rep,name=file_sources,json=fileSources,proto3" json:"file_sources,omitempty"` +type RegisterTracepointRequest_TracepointRequest struct { + TracepointDeployment *logicalpb.TracepointDeployment `protobuf:"bytes,1,opt,name=tracepoint_deployment,json=tracepointDeployment,proto3" json:"tracepoint_deployment,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + TTL *types.Duration `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (m *RegisterTracepointRequest_TracepointRequest) Reset() { + *m = RegisterTracepointRequest_TracepointRequest{} +} +func (*RegisterTracepointRequest_TracepointRequest) ProtoMessage() {} +func (*RegisterTracepointRequest_TracepointRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{10, 0} +} +func (m *RegisterTracepointRequest_TracepointRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RegisterTracepointRequest_TracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RegisterTracepointRequest_TracepointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.Merge(m, src) +} +func (m *RegisterTracepointRequest_TracepointRequest) XXX_Size() int { + return m.Size() +} +func (m *RegisterTracepointRequest_TracepointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RegisterTracepointRequest_TracepointRequest proto.InternalMessageInfo + +func (m *RegisterTracepointRequest_TracepointRequest) GetTracepointDeployment() *logicalpb.TracepointDeployment { + if m != nil { + return m.TracepointDeployment + } + return nil +} + +func (m *RegisterTracepointRequest_TracepointRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RegisterTracepointRequest_TracepointRequest) GetTTL() *types.Duration { + if m != nil { + return m.TTL + } + return nil +} + +type RegisterTracepointResponse struct { + Tracepoints []*RegisterTracepointResponse_TracepointStatus `protobuf:"bytes,1,rep,name=tracepoints,proto3" json:"tracepoints,omitempty"` Status *statuspb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` } -func (m *RegisterFileSourceResponse) Reset() { *m = RegisterFileSourceResponse{} } -func (*RegisterFileSourceResponse) ProtoMessage() {} -func (*RegisterFileSourceResponse) Descriptor() ([]byte, []int) { +func (m *RegisterTracepointResponse) Reset() { *m = RegisterTracepointResponse{} } +func (*RegisterTracepointResponse) ProtoMessage() {} +func (*RegisterTracepointResponse) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{11} } -func (m *RegisterFileSourceResponse) XXX_Unmarshal(b []byte) error { +func (m *RegisterTracepointResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterFileSourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterFileSourceResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterTracepointResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -693,51 +753,51 @@ func (m *RegisterFileSourceResponse) XXX_Marshal(b []byte, deterministic bool) ( return b[:n], nil } } -func (m *RegisterFileSourceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterFileSourceResponse.Merge(m, src) +func (m *RegisterTracepointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterTracepointResponse.Merge(m, src) } -func (m *RegisterFileSourceResponse) XXX_Size() int { +func (m *RegisterTracepointResponse) XXX_Size() int { return m.Size() } -func (m *RegisterFileSourceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterFileSourceResponse.DiscardUnknown(m) +func (m *RegisterTracepointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterTracepointResponse.DiscardUnknown(m) } -var xxx_messageInfo_RegisterFileSourceResponse proto.InternalMessageInfo +var xxx_messageInfo_RegisterTracepointResponse proto.InternalMessageInfo -func (m *RegisterFileSourceResponse) GetFileSources() []*RegisterFileSourceResponse_FileSourceStatus { +func (m *RegisterTracepointResponse) GetTracepoints() []*RegisterTracepointResponse_TracepointStatus { if m != nil { - return m.FileSources + return m.Tracepoints } return nil } -func (m *RegisterFileSourceResponse) GetStatus() *statuspb.Status { +func (m *RegisterTracepointResponse) GetStatus() *statuspb.Status { if m != nil { return m.Status } return nil } -type RegisterFileSourceResponse_FileSourceStatus struct { +type RegisterTracepointResponse_TracepointStatus struct { Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` ID *uuidpb.UUID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } -func (m *RegisterFileSourceResponse_FileSourceStatus) Reset() { - *m = RegisterFileSourceResponse_FileSourceStatus{} +func (m *RegisterTracepointResponse_TracepointStatus) Reset() { + *m = RegisterTracepointResponse_TracepointStatus{} } -func (*RegisterFileSourceResponse_FileSourceStatus) ProtoMessage() {} -func (*RegisterFileSourceResponse_FileSourceStatus) Descriptor() ([]byte, []int) { +func (*RegisterTracepointResponse_TracepointStatus) ProtoMessage() {} +func (*RegisterTracepointResponse_TracepointStatus) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{11, 0} } -func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Unmarshal(b []byte) error { +func (m *RegisterTracepointResponse_TracepointStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RegisterTracepointResponse_TracepointStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterFileSourceResponse_FileSourceStatus.Marshal(b, m, deterministic) + return xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -747,54 +807,54 @@ func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Marshal(b []byte, dete return b[:n], nil } } -func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterFileSourceResponse_FileSourceStatus.Merge(m, src) +func (m *RegisterTracepointResponse_TracepointStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.Merge(m, src) } -func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_Size() int { +func (m *RegisterTracepointResponse_TracepointStatus) XXX_Size() int { return m.Size() } -func (m *RegisterFileSourceResponse_FileSourceStatus) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterFileSourceResponse_FileSourceStatus.DiscardUnknown(m) +func (m *RegisterTracepointResponse_TracepointStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.DiscardUnknown(m) } -var xxx_messageInfo_RegisterFileSourceResponse_FileSourceStatus proto.InternalMessageInfo +var xxx_messageInfo_RegisterTracepointResponse_TracepointStatus proto.InternalMessageInfo -func (m *RegisterFileSourceResponse_FileSourceStatus) GetStatus() *statuspb.Status { +func (m *RegisterTracepointResponse_TracepointStatus) GetStatus() *statuspb.Status { if m != nil { return m.Status } return nil } -func (m *RegisterFileSourceResponse_FileSourceStatus) GetID() *uuidpb.UUID { +func (m *RegisterTracepointResponse_TracepointStatus) GetID() *uuidpb.UUID { if m != nil { return m.ID } return nil } -func (m *RegisterFileSourceResponse_FileSourceStatus) GetName() string { +func (m *RegisterTracepointResponse_TracepointStatus) GetName() string { if m != nil { return m.Name } return "" } -type GetFileSourceInfoRequest struct { +type GetTracepointInfoRequest struct { IDs []*uuidpb.UUID `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` } -func (m *GetFileSourceInfoRequest) Reset() { *m = GetFileSourceInfoRequest{} } -func (*GetFileSourceInfoRequest) ProtoMessage() {} -func (*GetFileSourceInfoRequest) Descriptor() ([]byte, []int) { +func (m *GetTracepointInfoRequest) Reset() { *m = GetTracepointInfoRequest{} } +func (*GetTracepointInfoRequest) ProtoMessage() {} +func (*GetTracepointInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{12} } -func (m *GetFileSourceInfoRequest) XXX_Unmarshal(b []byte) error { +func (m *GetTracepointInfoRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetFileSourceInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetTracepointInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetFileSourceInfoRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_GetTracepointInfoRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -804,40 +864,40 @@ func (m *GetFileSourceInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *GetFileSourceInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetFileSourceInfoRequest.Merge(m, src) +func (m *GetTracepointInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTracepointInfoRequest.Merge(m, src) } -func (m *GetFileSourceInfoRequest) XXX_Size() int { +func (m *GetTracepointInfoRequest) XXX_Size() int { return m.Size() } -func (m *GetFileSourceInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetFileSourceInfoRequest.DiscardUnknown(m) +func (m *GetTracepointInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTracepointInfoRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetFileSourceInfoRequest proto.InternalMessageInfo +var xxx_messageInfo_GetTracepointInfoRequest proto.InternalMessageInfo -func (m *GetFileSourceInfoRequest) GetIDs() []*uuidpb.UUID { +func (m *GetTracepointInfoRequest) GetIDs() []*uuidpb.UUID { if m != nil { return m.IDs } return nil } -type GetFileSourceInfoResponse struct { - FileSources []*GetFileSourceInfoResponse_FileSourceState `protobuf:"bytes,1,rep,name=file_sources,json=fileSources,proto3" json:"file_sources,omitempty"` +type GetTracepointInfoResponse struct { + Tracepoints []*GetTracepointInfoResponse_TracepointState `protobuf:"bytes,1,rep,name=tracepoints,proto3" json:"tracepoints,omitempty"` } -func (m *GetFileSourceInfoResponse) Reset() { *m = GetFileSourceInfoResponse{} } -func (*GetFileSourceInfoResponse) ProtoMessage() {} -func (*GetFileSourceInfoResponse) Descriptor() ([]byte, []int) { +func (m *GetTracepointInfoResponse) Reset() { *m = GetTracepointInfoResponse{} } +func (*GetTracepointInfoResponse) ProtoMessage() {} +func (*GetTracepointInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{13} } -func (m *GetFileSourceInfoResponse) XXX_Unmarshal(b []byte) error { +func (m *GetTracepointInfoResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetFileSourceInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetTracepointInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetFileSourceInfoResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetTracepointInfoResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -847,26 +907,26 @@ func (m *GetFileSourceInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *GetFileSourceInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetFileSourceInfoResponse.Merge(m, src) +func (m *GetTracepointInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTracepointInfoResponse.Merge(m, src) } -func (m *GetFileSourceInfoResponse) XXX_Size() int { +func (m *GetTracepointInfoResponse) XXX_Size() int { return m.Size() } -func (m *GetFileSourceInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetFileSourceInfoResponse.DiscardUnknown(m) +func (m *GetTracepointInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTracepointInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetFileSourceInfoResponse proto.InternalMessageInfo +var xxx_messageInfo_GetTracepointInfoResponse proto.InternalMessageInfo -func (m *GetFileSourceInfoResponse) GetFileSources() []*GetFileSourceInfoResponse_FileSourceState { +func (m *GetTracepointInfoResponse) GetTracepoints() []*GetTracepointInfoResponse_TracepointState { if m != nil { - return m.FileSources + return m.Tracepoints } return nil } -type GetFileSourceInfoResponse_FileSourceState struct { +type GetTracepointInfoResponse_TracepointState struct { ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` State statuspb.LifeCycleState `protobuf:"varint,2,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` Statuses []*statuspb.Status `protobuf:"bytes,3,rep,name=statuses,proto3" json:"statuses,omitempty"` @@ -875,19 +935,19 @@ type GetFileSourceInfoResponse_FileSourceState struct { SchemaNames []string `protobuf:"bytes,6,rep,name=schema_names,json=schemaNames,proto3" json:"schema_names,omitempty"` } -func (m *GetFileSourceInfoResponse_FileSourceState) Reset() { - *m = GetFileSourceInfoResponse_FileSourceState{} +func (m *GetTracepointInfoResponse_TracepointState) Reset() { + *m = GetTracepointInfoResponse_TracepointState{} } -func (*GetFileSourceInfoResponse_FileSourceState) ProtoMessage() {} -func (*GetFileSourceInfoResponse_FileSourceState) Descriptor() ([]byte, []int) { +func (*GetTracepointInfoResponse_TracepointState) ProtoMessage() {} +func (*GetTracepointInfoResponse_TracepointState) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{13, 0} } -func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Unmarshal(b []byte) error { +func (m *GetTracepointInfoResponse_TracepointState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetTracepointInfoResponse_TracepointState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetFileSourceInfoResponse_FileSourceState.Marshal(b, m, deterministic) + return xxx_messageInfo_GetTracepointInfoResponse_TracepointState.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -897,75 +957,75 @@ func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Marshal(b []byte, determ return b[:n], nil } } -func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetFileSourceInfoResponse_FileSourceState.Merge(m, src) +func (m *GetTracepointInfoResponse_TracepointState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTracepointInfoResponse_TracepointState.Merge(m, src) } -func (m *GetFileSourceInfoResponse_FileSourceState) XXX_Size() int { +func (m *GetTracepointInfoResponse_TracepointState) XXX_Size() int { return m.Size() } -func (m *GetFileSourceInfoResponse_FileSourceState) XXX_DiscardUnknown() { - xxx_messageInfo_GetFileSourceInfoResponse_FileSourceState.DiscardUnknown(m) +func (m *GetTracepointInfoResponse_TracepointState) XXX_DiscardUnknown() { + xxx_messageInfo_GetTracepointInfoResponse_TracepointState.DiscardUnknown(m) } -var xxx_messageInfo_GetFileSourceInfoResponse_FileSourceState proto.InternalMessageInfo +var xxx_messageInfo_GetTracepointInfoResponse_TracepointState proto.InternalMessageInfo -func (m *GetFileSourceInfoResponse_FileSourceState) GetID() *uuidpb.UUID { +func (m *GetTracepointInfoResponse_TracepointState) GetID() *uuidpb.UUID { if m != nil { return m.ID } return nil } -func (m *GetFileSourceInfoResponse_FileSourceState) GetState() statuspb.LifeCycleState { +func (m *GetTracepointInfoResponse_TracepointState) GetState() statuspb.LifeCycleState { if m != nil { return m.State } return statuspb.UNKNOWN_STATE } -func (m *GetFileSourceInfoResponse_FileSourceState) GetStatuses() []*statuspb.Status { +func (m *GetTracepointInfoResponse_TracepointState) GetStatuses() []*statuspb.Status { if m != nil { return m.Statuses } return nil } -func (m *GetFileSourceInfoResponse_FileSourceState) GetName() string { +func (m *GetTracepointInfoResponse_TracepointState) GetName() string { if m != nil { return m.Name } return "" } -func (m *GetFileSourceInfoResponse_FileSourceState) GetExpectedState() statuspb.LifeCycleState { +func (m *GetTracepointInfoResponse_TracepointState) GetExpectedState() statuspb.LifeCycleState { if m != nil { return m.ExpectedState } return statuspb.UNKNOWN_STATE } -func (m *GetFileSourceInfoResponse_FileSourceState) GetSchemaNames() []string { +func (m *GetTracepointInfoResponse_TracepointState) GetSchemaNames() []string { if m != nil { return m.SchemaNames } return nil } -type RemoveFileSourceRequest struct { +type RemoveTracepointRequest struct { Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` } -func (m *RemoveFileSourceRequest) Reset() { *m = RemoveFileSourceRequest{} } -func (*RemoveFileSourceRequest) ProtoMessage() {} -func (*RemoveFileSourceRequest) Descriptor() ([]byte, []int) { +func (m *RemoveTracepointRequest) Reset() { *m = RemoveTracepointRequest{} } +func (*RemoveTracepointRequest) ProtoMessage() {} +func (*RemoveTracepointRequest) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{14} } -func (m *RemoveFileSourceRequest) XXX_Unmarshal(b []byte) error { +func (m *RemoveTracepointRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RemoveFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RemoveTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RemoveFileSourceRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RemoveTracepointRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -975,40 +1035,40 @@ func (m *RemoveFileSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *RemoveFileSourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveFileSourceRequest.Merge(m, src) +func (m *RemoveTracepointRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveTracepointRequest.Merge(m, src) } -func (m *RemoveFileSourceRequest) XXX_Size() int { +func (m *RemoveTracepointRequest) XXX_Size() int { return m.Size() } -func (m *RemoveFileSourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveFileSourceRequest.DiscardUnknown(m) +func (m *RemoveTracepointRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveTracepointRequest.DiscardUnknown(m) } -var xxx_messageInfo_RemoveFileSourceRequest proto.InternalMessageInfo +var xxx_messageInfo_RemoveTracepointRequest proto.InternalMessageInfo -func (m *RemoveFileSourceRequest) GetNames() []string { +func (m *RemoveTracepointRequest) GetNames() []string { if m != nil { return m.Names } return nil } -type RemoveFileSourceResponse struct { +type RemoveTracepointResponse struct { Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (m *RemoveFileSourceResponse) Reset() { *m = RemoveFileSourceResponse{} } -func (*RemoveFileSourceResponse) ProtoMessage() {} -func (*RemoveFileSourceResponse) Descriptor() ([]byte, []int) { +func (m *RemoveTracepointResponse) Reset() { *m = RemoveTracepointResponse{} } +func (*RemoveTracepointResponse) ProtoMessage() {} +func (*RemoveTracepointResponse) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{15} } -func (m *RemoveFileSourceResponse) XXX_Unmarshal(b []byte) error { +func (m *RemoveTracepointResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RemoveFileSourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RemoveTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RemoveFileSourceResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_RemoveTracepointResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1018,40 +1078,42 @@ func (m *RemoveFileSourceResponse) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *RemoveFileSourceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveFileSourceResponse.Merge(m, src) +func (m *RemoveTracepointResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveTracepointResponse.Merge(m, src) } -func (m *RemoveFileSourceResponse) XXX_Size() int { +func (m *RemoveTracepointResponse) XXX_Size() int { return m.Size() } -func (m *RemoveFileSourceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveFileSourceResponse.DiscardUnknown(m) +func (m *RemoveTracepointResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveTracepointResponse.DiscardUnknown(m) } -var xxx_messageInfo_RemoveFileSourceResponse proto.InternalMessageInfo +var xxx_messageInfo_RemoveTracepointResponse proto.InternalMessageInfo -func (m *RemoveFileSourceResponse) GetStatus() *statuspb.Status { +func (m *RemoveTracepointResponse) GetStatus() *statuspb.Status { if m != nil { return m.Status } return nil } -type RegisterTracepointRequest struct { - Requests []*RegisterTracepointRequest_TracepointRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` +type UpdateConfigRequest struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + AgentPodName string `protobuf:"bytes,3,opt,name=agent_pod_name,json=agentPodName,proto3" json:"agent_pod_name,omitempty"` } -func (m *RegisterTracepointRequest) Reset() { *m = RegisterTracepointRequest{} } -func (*RegisterTracepointRequest) ProtoMessage() {} -func (*RegisterTracepointRequest) Descriptor() ([]byte, []int) { +func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } +func (*UpdateConfigRequest) ProtoMessage() {} +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor_bfe4468195647430, []int{16} } -func (m *RegisterTracepointRequest) XXX_Unmarshal(b []byte) error { +func (m *UpdateConfigRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *UpdateConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterTracepointRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_UpdateConfigRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1061,44 +1123,54 @@ func (m *RegisterTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *RegisterTracepointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterTracepointRequest.Merge(m, src) +func (m *UpdateConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateConfigRequest.Merge(m, src) } -func (m *RegisterTracepointRequest) XXX_Size() int { +func (m *UpdateConfigRequest) XXX_Size() int { return m.Size() } -func (m *RegisterTracepointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterTracepointRequest.DiscardUnknown(m) +func (m *UpdateConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateConfigRequest.DiscardUnknown(m) } -var xxx_messageInfo_RegisterTracepointRequest proto.InternalMessageInfo +var xxx_messageInfo_UpdateConfigRequest proto.InternalMessageInfo -func (m *RegisterTracepointRequest) GetRequests() []*RegisterTracepointRequest_TracepointRequest { +func (m *UpdateConfigRequest) GetKey() string { if m != nil { - return m.Requests + return m.Key } - return nil + return "" } -type RegisterTracepointRequest_TracepointRequest struct { - TracepointDeployment *logicalpb.TracepointDeployment `protobuf:"bytes,1,opt,name=tracepoint_deployment,json=tracepointDeployment,proto3" json:"tracepoint_deployment,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - TTL *types.Duration `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` +func (m *UpdateConfigRequest) GetValue() string { + if m != nil { + return m.Value + } + return "" } -func (m *RegisterTracepointRequest_TracepointRequest) Reset() { - *m = RegisterTracepointRequest_TracepointRequest{} +func (m *UpdateConfigRequest) GetAgentPodName() string { + if m != nil { + return m.AgentPodName + } + return "" } -func (*RegisterTracepointRequest_TracepointRequest) ProtoMessage() {} -func (*RegisterTracepointRequest_TracepointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{16, 0} + +type UpdateConfigResponse struct { + Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (m *RegisterTracepointRequest_TracepointRequest) XXX_Unmarshal(b []byte) error { + +func (m *UpdateConfigResponse) Reset() { *m = UpdateConfigResponse{} } +func (*UpdateConfigResponse) ProtoMessage() {} +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{17} +} +func (m *UpdateConfigResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterTracepointRequest_TracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *UpdateConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_UpdateConfigResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1108,55 +1180,75 @@ func (m *RegisterTracepointRequest_TracepointRequest) XXX_Marshal(b []byte, dete return b[:n], nil } } -func (m *RegisterTracepointRequest_TracepointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.Merge(m, src) +func (m *UpdateConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateConfigResponse.Merge(m, src) } -func (m *RegisterTracepointRequest_TracepointRequest) XXX_Size() int { +func (m *UpdateConfigResponse) XXX_Size() int { return m.Size() } -func (m *RegisterTracepointRequest_TracepointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterTracepointRequest_TracepointRequest.DiscardUnknown(m) +func (m *UpdateConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateConfigResponse.DiscardUnknown(m) } -var xxx_messageInfo_RegisterTracepointRequest_TracepointRequest proto.InternalMessageInfo +var xxx_messageInfo_UpdateConfigResponse proto.InternalMessageInfo -func (m *RegisterTracepointRequest_TracepointRequest) GetTracepointDeployment() *logicalpb.TracepointDeployment { +func (m *UpdateConfigResponse) GetStatus() *statuspb.Status { if m != nil { - return m.TracepointDeployment + return m.Status } return nil } -func (m *RegisterTracepointRequest_TracepointRequest) GetName() string { - if m != nil { - return m.Name - } - return "" +type GetScriptsRequest struct { } -func (m *RegisterTracepointRequest_TracepointRequest) GetTTL() *types.Duration { - if m != nil { - return m.TTL +func (m *GetScriptsRequest) Reset() { *m = GetScriptsRequest{} } +func (*GetScriptsRequest) ProtoMessage() {} +func (*GetScriptsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{18} +} +func (m *GetScriptsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetScriptsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return nil +} +func (m *GetScriptsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetScriptsRequest.Merge(m, src) +} +func (m *GetScriptsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetScriptsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetScriptsRequest.DiscardUnknown(m) } -type RegisterTracepointResponse struct { - Tracepoints []*RegisterTracepointResponse_TracepointStatus `protobuf:"bytes,1,rep,name=tracepoints,proto3" json:"tracepoints,omitempty"` - Status *statuspb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` +var xxx_messageInfo_GetScriptsRequest proto.InternalMessageInfo + +type GetScriptsResponse struct { + Scripts map[string]*cvmsgspb.CronScript `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *RegisterTracepointResponse) Reset() { *m = RegisterTracepointResponse{} } -func (*RegisterTracepointResponse) ProtoMessage() {} -func (*RegisterTracepointResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{17} +func (m *GetScriptsResponse) Reset() { *m = GetScriptsResponse{} } +func (*GetScriptsResponse) ProtoMessage() {} +func (*GetScriptsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{19} } -func (m *RegisterTracepointResponse) XXX_Unmarshal(b []byte) error { +func (m *GetScriptsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterTracepointResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetScriptsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1166,51 +1258,40 @@ func (m *RegisterTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ( return b[:n], nil } } -func (m *RegisterTracepointResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterTracepointResponse.Merge(m, src) +func (m *GetScriptsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetScriptsResponse.Merge(m, src) } -func (m *RegisterTracepointResponse) XXX_Size() int { +func (m *GetScriptsResponse) XXX_Size() int { return m.Size() } -func (m *RegisterTracepointResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterTracepointResponse.DiscardUnknown(m) +func (m *GetScriptsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetScriptsResponse.DiscardUnknown(m) } -var xxx_messageInfo_RegisterTracepointResponse proto.InternalMessageInfo - -func (m *RegisterTracepointResponse) GetTracepoints() []*RegisterTracepointResponse_TracepointStatus { - if m != nil { - return m.Tracepoints - } - return nil -} +var xxx_messageInfo_GetScriptsResponse proto.InternalMessageInfo -func (m *RegisterTracepointResponse) GetStatus() *statuspb.Status { +func (m *GetScriptsResponse) GetScripts() map[string]*cvmsgspb.CronScript { if m != nil { - return m.Status + return m.Scripts } return nil } -type RegisterTracepointResponse_TracepointStatus struct { - Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - ID *uuidpb.UUID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` +type AddOrUpdateScriptRequest struct { + Script *cvmsgspb.CronScript `protobuf:"bytes,1,opt,name=script,proto3" json:"script,omitempty"` } -func (m *RegisterTracepointResponse_TracepointStatus) Reset() { - *m = RegisterTracepointResponse_TracepointStatus{} -} -func (*RegisterTracepointResponse_TracepointStatus) ProtoMessage() {} -func (*RegisterTracepointResponse_TracepointStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{17, 0} +func (m *AddOrUpdateScriptRequest) Reset() { *m = AddOrUpdateScriptRequest{} } +func (*AddOrUpdateScriptRequest) ProtoMessage() {} +func (*AddOrUpdateScriptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{20} } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_Unmarshal(b []byte) error { +func (m *AddOrUpdateScriptRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *AddOrUpdateScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.Marshal(b, m, deterministic) + return xxx_messageInfo_AddOrUpdateScriptRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1220,54 +1301,39 @@ func (m *RegisterTracepointResponse_TracepointStatus) XXX_Marshal(b []byte, dete return b[:n], nil } } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.Merge(m, src) +func (m *AddOrUpdateScriptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddOrUpdateScriptRequest.Merge(m, src) } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_Size() int { +func (m *AddOrUpdateScriptRequest) XXX_Size() int { return m.Size() } -func (m *RegisterTracepointResponse_TracepointStatus) XXX_DiscardUnknown() { - xxx_messageInfo_RegisterTracepointResponse_TracepointStatus.DiscardUnknown(m) +func (m *AddOrUpdateScriptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddOrUpdateScriptRequest.DiscardUnknown(m) } -var xxx_messageInfo_RegisterTracepointResponse_TracepointStatus proto.InternalMessageInfo - -func (m *RegisterTracepointResponse_TracepointStatus) GetStatus() *statuspb.Status { - if m != nil { - return m.Status - } - return nil -} +var xxx_messageInfo_AddOrUpdateScriptRequest proto.InternalMessageInfo -func (m *RegisterTracepointResponse_TracepointStatus) GetID() *uuidpb.UUID { +func (m *AddOrUpdateScriptRequest) GetScript() *cvmsgspb.CronScript { if m != nil { - return m.ID + return m.Script } return nil } -func (m *RegisterTracepointResponse_TracepointStatus) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type GetTracepointInfoRequest struct { - IDs []*uuidpb.UUID `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +type AddOrUpdateScriptResponse struct { } -func (m *GetTracepointInfoRequest) Reset() { *m = GetTracepointInfoRequest{} } -func (*GetTracepointInfoRequest) ProtoMessage() {} -func (*GetTracepointInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{18} +func (m *AddOrUpdateScriptResponse) Reset() { *m = AddOrUpdateScriptResponse{} } +func (*AddOrUpdateScriptResponse) ProtoMessage() {} +func (*AddOrUpdateScriptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{21} } -func (m *GetTracepointInfoRequest) XXX_Unmarshal(b []byte) error { +func (m *AddOrUpdateScriptResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetTracepointInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *AddOrUpdateScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetTracepointInfoRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_AddOrUpdateScriptResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1277,40 +1343,33 @@ func (m *GetTracepointInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *GetTracepointInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTracepointInfoRequest.Merge(m, src) +func (m *AddOrUpdateScriptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddOrUpdateScriptResponse.Merge(m, src) } -func (m *GetTracepointInfoRequest) XXX_Size() int { +func (m *AddOrUpdateScriptResponse) XXX_Size() int { return m.Size() } -func (m *GetTracepointInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetTracepointInfoRequest.DiscardUnknown(m) +func (m *AddOrUpdateScriptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AddOrUpdateScriptResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetTracepointInfoRequest proto.InternalMessageInfo - -func (m *GetTracepointInfoRequest) GetIDs() []*uuidpb.UUID { - if m != nil { - return m.IDs - } - return nil -} +var xxx_messageInfo_AddOrUpdateScriptResponse proto.InternalMessageInfo -type GetTracepointInfoResponse struct { - Tracepoints []*GetTracepointInfoResponse_TracepointState `protobuf:"bytes,1,rep,name=tracepoints,proto3" json:"tracepoints,omitempty"` +type DeleteScriptRequest struct { + ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` } -func (m *GetTracepointInfoResponse) Reset() { *m = GetTracepointInfoResponse{} } -func (*GetTracepointInfoResponse) ProtoMessage() {} -func (*GetTracepointInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{19} +func (m *DeleteScriptRequest) Reset() { *m = DeleteScriptRequest{} } +func (*DeleteScriptRequest) ProtoMessage() {} +func (*DeleteScriptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{22} } -func (m *GetTracepointInfoResponse) XXX_Unmarshal(b []byte) error { +func (m *DeleteScriptRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetTracepointInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeleteScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetTracepointInfoResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_DeleteScriptRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1320,47 +1379,39 @@ func (m *GetTracepointInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *GetTracepointInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTracepointInfoResponse.Merge(m, src) +func (m *DeleteScriptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteScriptRequest.Merge(m, src) } -func (m *GetTracepointInfoResponse) XXX_Size() int { +func (m *DeleteScriptRequest) XXX_Size() int { return m.Size() } -func (m *GetTracepointInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetTracepointInfoResponse.DiscardUnknown(m) +func (m *DeleteScriptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteScriptRequest.DiscardUnknown(m) } -var xxx_messageInfo_GetTracepointInfoResponse proto.InternalMessageInfo +var xxx_messageInfo_DeleteScriptRequest proto.InternalMessageInfo -func (m *GetTracepointInfoResponse) GetTracepoints() []*GetTracepointInfoResponse_TracepointState { +func (m *DeleteScriptRequest) GetScriptID() *uuidpb.UUID { if m != nil { - return m.Tracepoints + return m.ScriptID } return nil } -type GetTracepointInfoResponse_TracepointState struct { - ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - State statuspb.LifeCycleState `protobuf:"varint,2,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` - Statuses []*statuspb.Status `protobuf:"bytes,3,rep,name=statuses,proto3" json:"statuses,omitempty"` - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - ExpectedState statuspb.LifeCycleState `protobuf:"varint,5,opt,name=expected_state,json=expectedState,proto3,enum=px.statuspb.LifeCycleState" json:"expected_state,omitempty"` - SchemaNames []string `protobuf:"bytes,6,rep,name=schema_names,json=schemaNames,proto3" json:"schema_names,omitempty"` +type DeleteScriptResponse struct { } -func (m *GetTracepointInfoResponse_TracepointState) Reset() { - *m = GetTracepointInfoResponse_TracepointState{} -} -func (*GetTracepointInfoResponse_TracepointState) ProtoMessage() {} -func (*GetTracepointInfoResponse_TracepointState) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{19, 0} +func (m *DeleteScriptResponse) Reset() { *m = DeleteScriptResponse{} } +func (*DeleteScriptResponse) ProtoMessage() {} +func (*DeleteScriptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{23} } -func (m *GetTracepointInfoResponse_TracepointState) XXX_Unmarshal(b []byte) error { +func (m *DeleteScriptResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *GetTracepointInfoResponse_TracepointState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *DeleteScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_GetTracepointInfoResponse_TracepointState.Marshal(b, m, deterministic) + return xxx_messageInfo_DeleteScriptResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1370,75 +1421,33 @@ func (m *GetTracepointInfoResponse_TracepointState) XXX_Marshal(b []byte, determ return b[:n], nil } } -func (m *GetTracepointInfoResponse_TracepointState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTracepointInfoResponse_TracepointState.Merge(m, src) +func (m *DeleteScriptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteScriptResponse.Merge(m, src) } -func (m *GetTracepointInfoResponse_TracepointState) XXX_Size() int { +func (m *DeleteScriptResponse) XXX_Size() int { return m.Size() } -func (m *GetTracepointInfoResponse_TracepointState) XXX_DiscardUnknown() { - xxx_messageInfo_GetTracepointInfoResponse_TracepointState.DiscardUnknown(m) +func (m *DeleteScriptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteScriptResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetTracepointInfoResponse_TracepointState proto.InternalMessageInfo +var xxx_messageInfo_DeleteScriptResponse proto.InternalMessageInfo -func (m *GetTracepointInfoResponse_TracepointState) GetID() *uuidpb.UUID { - if m != nil { - return m.ID - } - return nil -} - -func (m *GetTracepointInfoResponse_TracepointState) GetState() statuspb.LifeCycleState { - if m != nil { - return m.State - } - return statuspb.UNKNOWN_STATE -} - -func (m *GetTracepointInfoResponse_TracepointState) GetStatuses() []*statuspb.Status { - if m != nil { - return m.Statuses - } - return nil -} - -func (m *GetTracepointInfoResponse_TracepointState) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GetTracepointInfoResponse_TracepointState) GetExpectedState() statuspb.LifeCycleState { - if m != nil { - return m.ExpectedState - } - return statuspb.UNKNOWN_STATE -} - -func (m *GetTracepointInfoResponse_TracepointState) GetSchemaNames() []string { - if m != nil { - return m.SchemaNames - } - return nil -} - -type RemoveTracepointRequest struct { - Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` +type SetScriptsRequest struct { + Scripts map[string]*cvmsgspb.CronScript `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *RemoveTracepointRequest) Reset() { *m = RemoveTracepointRequest{} } -func (*RemoveTracepointRequest) ProtoMessage() {} -func (*RemoveTracepointRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{20} +func (m *SetScriptsRequest) Reset() { *m = SetScriptsRequest{} } +func (*SetScriptsRequest) ProtoMessage() {} +func (*SetScriptsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{24} } -func (m *RemoveTracepointRequest) XXX_Unmarshal(b []byte) error { +func (m *SetScriptsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RemoveTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *SetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RemoveTracepointRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_SetScriptsRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1448,40 +1457,39 @@ func (m *RemoveTracepointRequest) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *RemoveTracepointRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveTracepointRequest.Merge(m, src) +func (m *SetScriptsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetScriptsRequest.Merge(m, src) } -func (m *RemoveTracepointRequest) XXX_Size() int { +func (m *SetScriptsRequest) XXX_Size() int { return m.Size() } -func (m *RemoveTracepointRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveTracepointRequest.DiscardUnknown(m) +func (m *SetScriptsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetScriptsRequest.DiscardUnknown(m) } -var xxx_messageInfo_RemoveTracepointRequest proto.InternalMessageInfo +var xxx_messageInfo_SetScriptsRequest proto.InternalMessageInfo -func (m *RemoveTracepointRequest) GetNames() []string { +func (m *SetScriptsRequest) GetScripts() map[string]*cvmsgspb.CronScript { if m != nil { - return m.Names + return m.Scripts } return nil } -type RemoveTracepointResponse struct { - Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` +type SetScriptsResponse struct { } -func (m *RemoveTracepointResponse) Reset() { *m = RemoveTracepointResponse{} } -func (*RemoveTracepointResponse) ProtoMessage() {} -func (*RemoveTracepointResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{21} +func (m *SetScriptsResponse) Reset() { *m = SetScriptsResponse{} } +func (*SetScriptsResponse) ProtoMessage() {} +func (*SetScriptsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{25} } -func (m *RemoveTracepointResponse) XXX_Unmarshal(b []byte) error { +func (m *SetScriptsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RemoveTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *SetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_RemoveTracepointResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_SetScriptsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1491,42 +1499,36 @@ func (m *RemoveTracepointResponse) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *RemoveTracepointResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveTracepointResponse.Merge(m, src) +func (m *SetScriptsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetScriptsResponse.Merge(m, src) } -func (m *RemoveTracepointResponse) XXX_Size() int { +func (m *SetScriptsResponse) XXX_Size() int { return m.Size() } -func (m *RemoveTracepointResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveTracepointResponse.DiscardUnknown(m) +func (m *SetScriptsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetScriptsResponse.DiscardUnknown(m) } -var xxx_messageInfo_RemoveTracepointResponse proto.InternalMessageInfo - -func (m *RemoveTracepointResponse) GetStatus() *statuspb.Status { - if m != nil { - return m.Status - } - return nil -} +var xxx_messageInfo_SetScriptsResponse proto.InternalMessageInfo -type UpdateConfigRequest struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - AgentPodName string `protobuf:"bytes,3,opt,name=agent_pod_name,json=agentPodName,proto3" json:"agent_pod_name,omitempty"` +type ExecutionStats struct { + ExecutionTimeNs int64 `protobuf:"varint,1,opt,name=execution_time_ns,json=executionTimeNs,proto3" json:"execution_time_ns,omitempty"` + CompilationTimeNs int64 `protobuf:"varint,2,opt,name=compilation_time_ns,json=compilationTimeNs,proto3" json:"compilation_time_ns,omitempty"` + BytesProcessed int64 `protobuf:"varint,3,opt,name=bytes_processed,json=bytesProcessed,proto3" json:"bytes_processed,omitempty"` + RecordsProcessed int64 `protobuf:"varint,4,opt,name=records_processed,json=recordsProcessed,proto3" json:"records_processed,omitempty"` } -func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } -func (*UpdateConfigRequest) ProtoMessage() {} -func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{22} +func (m *ExecutionStats) Reset() { *m = ExecutionStats{} } +func (*ExecutionStats) ProtoMessage() {} +func (*ExecutionStats) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{26} } -func (m *UpdateConfigRequest) XXX_Unmarshal(b []byte) error { +func (m *ExecutionStats) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *UpdateConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ExecutionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_UpdateConfigRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_ExecutionStats.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1536,54 +1538,66 @@ func (m *UpdateConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *UpdateConfigRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateConfigRequest.Merge(m, src) +func (m *ExecutionStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecutionStats.Merge(m, src) } -func (m *UpdateConfigRequest) XXX_Size() int { +func (m *ExecutionStats) XXX_Size() int { return m.Size() } -func (m *UpdateConfigRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateConfigRequest.DiscardUnknown(m) +func (m *ExecutionStats) XXX_DiscardUnknown() { + xxx_messageInfo_ExecutionStats.DiscardUnknown(m) } -var xxx_messageInfo_UpdateConfigRequest proto.InternalMessageInfo +var xxx_messageInfo_ExecutionStats proto.InternalMessageInfo -func (m *UpdateConfigRequest) GetKey() string { +func (m *ExecutionStats) GetExecutionTimeNs() int64 { if m != nil { - return m.Key + return m.ExecutionTimeNs } - return "" + return 0 } -func (m *UpdateConfigRequest) GetValue() string { +func (m *ExecutionStats) GetCompilationTimeNs() int64 { if m != nil { - return m.Value + return m.CompilationTimeNs } - return "" + return 0 } -func (m *UpdateConfigRequest) GetAgentPodName() string { +func (m *ExecutionStats) GetBytesProcessed() int64 { if m != nil { - return m.AgentPodName + return m.BytesProcessed } - return "" + return 0 } -type UpdateConfigResponse struct { - Status *statuspb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` +func (m *ExecutionStats) GetRecordsProcessed() int64 { + if m != nil { + return m.RecordsProcessed + } + return 0 } -func (m *UpdateConfigResponse) Reset() { *m = UpdateConfigResponse{} } -func (*UpdateConfigResponse) ProtoMessage() {} -func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{23} +type RecordExecutionResultRequest struct { + ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` + Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Types that are valid to be assigned to Result: + // *RecordExecutionResultRequest_Error + // *RecordExecutionResultRequest_ExecutionStats + Result isRecordExecutionResultRequest_Result `protobuf_oneof:"result"` } -func (m *UpdateConfigResponse) XXX_Unmarshal(b []byte) error { + +func (m *RecordExecutionResultRequest) Reset() { *m = RecordExecutionResultRequest{} } +func (*RecordExecutionResultRequest) ProtoMessage() {} +func (*RecordExecutionResultRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{27} +} +func (m *RecordExecutionResultRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *UpdateConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RecordExecutionResultRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_UpdateConfigResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_RecordExecutionResultRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1593,118 +1607,92 @@ func (m *UpdateConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *UpdateConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateConfigResponse.Merge(m, src) +func (m *RecordExecutionResultRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecordExecutionResultRequest.Merge(m, src) } -func (m *UpdateConfigResponse) XXX_Size() int { +func (m *RecordExecutionResultRequest) XXX_Size() int { return m.Size() } -func (m *UpdateConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateConfigResponse.DiscardUnknown(m) +func (m *RecordExecutionResultRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RecordExecutionResultRequest.DiscardUnknown(m) } -var xxx_messageInfo_UpdateConfigResponse proto.InternalMessageInfo +var xxx_messageInfo_RecordExecutionResultRequest proto.InternalMessageInfo -func (m *UpdateConfigResponse) GetStatus() *statuspb.Status { +type isRecordExecutionResultRequest_Result interface { + isRecordExecutionResultRequest_Result() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type RecordExecutionResultRequest_Error struct { + Error *statuspb.Status `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` +} +type RecordExecutionResultRequest_ExecutionStats struct { + ExecutionStats *ExecutionStats `protobuf:"bytes,4,opt,name=execution_stats,json=executionStats,proto3,oneof" json:"execution_stats,omitempty"` +} + +func (*RecordExecutionResultRequest_Error) isRecordExecutionResultRequest_Result() {} +func (*RecordExecutionResultRequest_ExecutionStats) isRecordExecutionResultRequest_Result() {} + +func (m *RecordExecutionResultRequest) GetResult() isRecordExecutionResultRequest_Result { if m != nil { - return m.Status + return m.Result } return nil } -type GetScriptsRequest struct { +func (m *RecordExecutionResultRequest) GetScriptID() *uuidpb.UUID { + if m != nil { + return m.ScriptID + } + return nil } -func (m *GetScriptsRequest) Reset() { *m = GetScriptsRequest{} } -func (*GetScriptsRequest) ProtoMessage() {} -func (*GetScriptsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{24} -} -func (m *GetScriptsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetScriptsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *RecordExecutionResultRequest) GetTimestamp() *types.Timestamp { + if m != nil { + return m.Timestamp } + return nil } -func (m *GetScriptsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetScriptsRequest.Merge(m, src) -} -func (m *GetScriptsRequest) XXX_Size() int { - return m.Size() -} -func (m *GetScriptsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetScriptsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetScriptsRequest proto.InternalMessageInfo -type GetScriptsResponse struct { - Scripts map[string]*cvmsgspb.CronScript `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +func (m *RecordExecutionResultRequest) GetError() *statuspb.Status { + if x, ok := m.GetResult().(*RecordExecutionResultRequest_Error); ok { + return x.Error + } + return nil } -func (m *GetScriptsResponse) Reset() { *m = GetScriptsResponse{} } -func (*GetScriptsResponse) ProtoMessage() {} -func (*GetScriptsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{25} -} -func (m *GetScriptsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetScriptsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *RecordExecutionResultRequest) GetExecutionStats() *ExecutionStats { + if x, ok := m.GetResult().(*RecordExecutionResultRequest_ExecutionStats); ok { + return x.ExecutionStats } + return nil } -func (m *GetScriptsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetScriptsResponse.Merge(m, src) -} -func (m *GetScriptsResponse) XXX_Size() int { - return m.Size() -} -func (m *GetScriptsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetScriptsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetScriptsResponse proto.InternalMessageInfo -func (m *GetScriptsResponse) GetScripts() map[string]*cvmsgspb.CronScript { - if m != nil { - return m.Scripts +// XXX_OneofWrappers is for the internal use of the proto package. +func (*RecordExecutionResultRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*RecordExecutionResultRequest_Error)(nil), + (*RecordExecutionResultRequest_ExecutionStats)(nil), } - return nil } -type AddOrUpdateScriptRequest struct { - Script *cvmsgspb.CronScript `protobuf:"bytes,1,opt,name=script,proto3" json:"script,omitempty"` +type RecordExecutionResultResponse struct { } -func (m *AddOrUpdateScriptRequest) Reset() { *m = AddOrUpdateScriptRequest{} } -func (*AddOrUpdateScriptRequest) ProtoMessage() {} -func (*AddOrUpdateScriptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{26} +func (m *RecordExecutionResultResponse) Reset() { *m = RecordExecutionResultResponse{} } +func (*RecordExecutionResultResponse) ProtoMessage() {} +func (*RecordExecutionResultResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{28} } -func (m *AddOrUpdateScriptRequest) XXX_Unmarshal(b []byte) error { +func (m *RecordExecutionResultResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *AddOrUpdateScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RecordExecutionResultResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_AddOrUpdateScriptRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_RecordExecutionResultResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1714,39 +1702,32 @@ func (m *AddOrUpdateScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *AddOrUpdateScriptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddOrUpdateScriptRequest.Merge(m, src) +func (m *RecordExecutionResultResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecordExecutionResultResponse.Merge(m, src) } -func (m *AddOrUpdateScriptRequest) XXX_Size() int { +func (m *RecordExecutionResultResponse) XXX_Size() int { return m.Size() } -func (m *AddOrUpdateScriptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddOrUpdateScriptRequest.DiscardUnknown(m) +func (m *RecordExecutionResultResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RecordExecutionResultResponse.DiscardUnknown(m) } -var xxx_messageInfo_AddOrUpdateScriptRequest proto.InternalMessageInfo - -func (m *AddOrUpdateScriptRequest) GetScript() *cvmsgspb.CronScript { - if m != nil { - return m.Script - } - return nil -} +var xxx_messageInfo_RecordExecutionResultResponse proto.InternalMessageInfo -type AddOrUpdateScriptResponse struct { +type GetAllExecutionResultsRequest struct { } -func (m *AddOrUpdateScriptResponse) Reset() { *m = AddOrUpdateScriptResponse{} } -func (*AddOrUpdateScriptResponse) ProtoMessage() {} -func (*AddOrUpdateScriptResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{27} +func (m *GetAllExecutionResultsRequest) Reset() { *m = GetAllExecutionResultsRequest{} } +func (*GetAllExecutionResultsRequest) ProtoMessage() {} +func (*GetAllExecutionResultsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{29} } -func (m *AddOrUpdateScriptResponse) XXX_Unmarshal(b []byte) error { +func (m *GetAllExecutionResultsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *AddOrUpdateScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetAllExecutionResultsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_AddOrUpdateScriptResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetAllExecutionResultsRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1756,33 +1737,33 @@ func (m *AddOrUpdateScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *AddOrUpdateScriptResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddOrUpdateScriptResponse.Merge(m, src) +func (m *GetAllExecutionResultsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllExecutionResultsRequest.Merge(m, src) } -func (m *AddOrUpdateScriptResponse) XXX_Size() int { +func (m *GetAllExecutionResultsRequest) XXX_Size() int { return m.Size() } -func (m *AddOrUpdateScriptResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddOrUpdateScriptResponse.DiscardUnknown(m) +func (m *GetAllExecutionResultsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllExecutionResultsRequest.DiscardUnknown(m) } -var xxx_messageInfo_AddOrUpdateScriptResponse proto.InternalMessageInfo +var xxx_messageInfo_GetAllExecutionResultsRequest proto.InternalMessageInfo -type DeleteScriptRequest struct { - ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` +type GetAllExecutionResultsResponse struct { + Results []*GetAllExecutionResultsResponse_ExecutionResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } -func (m *DeleteScriptRequest) Reset() { *m = DeleteScriptRequest{} } -func (*DeleteScriptRequest) ProtoMessage() {} -func (*DeleteScriptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{28} +func (m *GetAllExecutionResultsResponse) Reset() { *m = GetAllExecutionResultsResponse{} } +func (*GetAllExecutionResultsResponse) ProtoMessage() {} +func (*GetAllExecutionResultsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{30} } -func (m *DeleteScriptRequest) XXX_Unmarshal(b []byte) error { +func (m *GetAllExecutionResultsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *DeleteScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetAllExecutionResultsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_DeleteScriptRequest.Marshal(b, m, deterministic) + return xxx_messageInfo_GetAllExecutionResultsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1792,39 +1773,47 @@ func (m *DeleteScriptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *DeleteScriptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteScriptRequest.Merge(m, src) +func (m *GetAllExecutionResultsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllExecutionResultsResponse.Merge(m, src) } -func (m *DeleteScriptRequest) XXX_Size() int { +func (m *GetAllExecutionResultsResponse) XXX_Size() int { return m.Size() } -func (m *DeleteScriptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteScriptRequest.DiscardUnknown(m) +func (m *GetAllExecutionResultsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllExecutionResultsResponse.DiscardUnknown(m) } -var xxx_messageInfo_DeleteScriptRequest proto.InternalMessageInfo +var xxx_messageInfo_GetAllExecutionResultsResponse proto.InternalMessageInfo -func (m *DeleteScriptRequest) GetScriptID() *uuidpb.UUID { +func (m *GetAllExecutionResultsResponse) GetResults() []*GetAllExecutionResultsResponse_ExecutionResult { if m != nil { - return m.ScriptID + return m.Results } return nil } -type DeleteScriptResponse struct { +type GetAllExecutionResultsResponse_ExecutionResult struct { + ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` + Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Types that are valid to be assigned to Result: + // *GetAllExecutionResultsResponse_ExecutionResult_Error + // *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats + Result isGetAllExecutionResultsResponse_ExecutionResult_Result `protobuf_oneof:"result"` } -func (m *DeleteScriptResponse) Reset() { *m = DeleteScriptResponse{} } -func (*DeleteScriptResponse) ProtoMessage() {} -func (*DeleteScriptResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{29} +func (m *GetAllExecutionResultsResponse_ExecutionResult) Reset() { + *m = GetAllExecutionResultsResponse_ExecutionResult{} } -func (m *DeleteScriptResponse) XXX_Unmarshal(b []byte) error { +func (*GetAllExecutionResultsResponse_ExecutionResult) ProtoMessage() {} +func (*GetAllExecutionResultsResponse_ExecutionResult) Descriptor() ([]byte, []int) { + return fileDescriptor_bfe4468195647430, []int{30, 0} +} +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *DeleteScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_DeleteScriptResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1834,815 +1823,394 @@ func (m *DeleteScriptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte return b[:n], nil } } -func (m *DeleteScriptResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteScriptResponse.Merge(m, src) +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.Merge(m, src) } -func (m *DeleteScriptResponse) XXX_Size() int { +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Size() int { return m.Size() } -func (m *DeleteScriptResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteScriptResponse.DiscardUnknown(m) +func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.DiscardUnknown(m) } -var xxx_messageInfo_DeleteScriptResponse proto.InternalMessageInfo +var xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult proto.InternalMessageInfo -type SetScriptsRequest struct { - Scripts map[string]*cvmsgspb.CronScript `protobuf:"bytes,1,rep,name=scripts,proto3" json:"scripts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +type isGetAllExecutionResultsResponse_ExecutionResult_Result interface { + isGetAllExecutionResultsResponse_ExecutionResult_Result() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int } -func (m *SetScriptsRequest) Reset() { *m = SetScriptsRequest{} } -func (*SetScriptsRequest) ProtoMessage() {} -func (*SetScriptsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{30} -} -func (m *SetScriptsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SetScriptsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SetScriptsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } +type GetAllExecutionResultsResponse_ExecutionResult_Error struct { + Error *statuspb.Status `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` } -func (m *SetScriptsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetScriptsRequest.Merge(m, src) +type GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats struct { + ExecutionStats *ExecutionStats `protobuf:"bytes,4,opt,name=execution_stats,json=executionStats,proto3,oneof" json:"execution_stats,omitempty"` } -func (m *SetScriptsRequest) XXX_Size() int { - return m.Size() + +func (*GetAllExecutionResultsResponse_ExecutionResult_Error) isGetAllExecutionResultsResponse_ExecutionResult_Result() { } -func (m *SetScriptsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetScriptsRequest.DiscardUnknown(m) +func (*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) isGetAllExecutionResultsResponse_ExecutionResult_Result() { } -var xxx_messageInfo_SetScriptsRequest proto.InternalMessageInfo +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetResult() isGetAllExecutionResultsResponse_ExecutionResult_Result { + if m != nil { + return m.Result + } + return nil +} -func (m *SetScriptsRequest) GetScripts() map[string]*cvmsgspb.CronScript { +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetScriptID() *uuidpb.UUID { if m != nil { - return m.Scripts + return m.ScriptID } return nil } -type SetScriptsResponse struct { +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetTimestamp() *types.Timestamp { + if m != nil { + return m.Timestamp + } + return nil } -func (m *SetScriptsResponse) Reset() { *m = SetScriptsResponse{} } -func (*SetScriptsResponse) ProtoMessage() {} -func (*SetScriptsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{31} +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetError() *statuspb.Status { + if x, ok := m.GetResult().(*GetAllExecutionResultsResponse_ExecutionResult_Error); ok { + return x.Error + } + return nil } -func (m *SetScriptsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SetScriptsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SetScriptsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SetScriptsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetScriptsResponse.Merge(m, src) -} -func (m *SetScriptsResponse) XXX_Size() int { - return m.Size() -} -func (m *SetScriptsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SetScriptsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SetScriptsResponse proto.InternalMessageInfo -type ExecutionStats struct { - ExecutionTimeNs int64 `protobuf:"varint,1,opt,name=execution_time_ns,json=executionTimeNs,proto3" json:"execution_time_ns,omitempty"` - CompilationTimeNs int64 `protobuf:"varint,2,opt,name=compilation_time_ns,json=compilationTimeNs,proto3" json:"compilation_time_ns,omitempty"` - BytesProcessed int64 `protobuf:"varint,3,opt,name=bytes_processed,json=bytesProcessed,proto3" json:"bytes_processed,omitempty"` - RecordsProcessed int64 `protobuf:"varint,4,opt,name=records_processed,json=recordsProcessed,proto3" json:"records_processed,omitempty"` -} - -func (m *ExecutionStats) Reset() { *m = ExecutionStats{} } -func (*ExecutionStats) ProtoMessage() {} -func (*ExecutionStats) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{32} -} -func (m *ExecutionStats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecutionStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecutionStats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *GetAllExecutionResultsResponse_ExecutionResult) GetExecutionStats() *ExecutionStats { + if x, ok := m.GetResult().(*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats); ok { + return x.ExecutionStats } + return nil } -func (m *ExecutionStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecutionStats.Merge(m, src) -} -func (m *ExecutionStats) XXX_Size() int { - return m.Size() -} -func (m *ExecutionStats) XXX_DiscardUnknown() { - xxx_messageInfo_ExecutionStats.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecutionStats proto.InternalMessageInfo -func (m *ExecutionStats) GetExecutionTimeNs() int64 { - if m != nil { - return m.ExecutionTimeNs +// XXX_OneofWrappers is for the internal use of the proto package. +func (*GetAllExecutionResultsResponse_ExecutionResult) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*GetAllExecutionResultsResponse_ExecutionResult_Error)(nil), + (*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats)(nil), } - return 0 } -func (m *ExecutionStats) GetCompilationTimeNs() int64 { - if m != nil { - return m.CompilationTimeNs - } - return 0 +func init() { + proto.RegisterType((*SchemaRequest)(nil), "px.vizier.services.metadata.SchemaRequest") + proto.RegisterType((*SchemaResponse)(nil), "px.vizier.services.metadata.SchemaResponse") + proto.RegisterType((*AgentInfoRequest)(nil), "px.vizier.services.metadata.AgentInfoRequest") + proto.RegisterType((*AgentInfoResponse)(nil), "px.vizier.services.metadata.AgentInfoResponse") + proto.RegisterType((*AgentMetadata)(nil), "px.vizier.services.metadata.AgentMetadata") + proto.RegisterType((*AgentUpdatesRequest)(nil), "px.vizier.services.metadata.AgentUpdatesRequest") + proto.RegisterType((*AgentUpdate)(nil), "px.vizier.services.metadata.AgentUpdate") + proto.RegisterType((*AgentUpdatesResponse)(nil), "px.vizier.services.metadata.AgentUpdatesResponse") + proto.RegisterType((*WithPrefixKeyRequest)(nil), "px.vizier.services.metadata.WithPrefixKeyRequest") + proto.RegisterType((*WithPrefixKeyResponse)(nil), "px.vizier.services.metadata.WithPrefixKeyResponse") + proto.RegisterType((*WithPrefixKeyResponse_KV)(nil), "px.vizier.services.metadata.WithPrefixKeyResponse.KV") + proto.RegisterType((*RegisterTracepointRequest)(nil), "px.vizier.services.metadata.RegisterTracepointRequest") + proto.RegisterType((*RegisterTracepointRequest_TracepointRequest)(nil), "px.vizier.services.metadata.RegisterTracepointRequest.TracepointRequest") + proto.RegisterType((*RegisterTracepointResponse)(nil), "px.vizier.services.metadata.RegisterTracepointResponse") + proto.RegisterType((*RegisterTracepointResponse_TracepointStatus)(nil), "px.vizier.services.metadata.RegisterTracepointResponse.TracepointStatus") + proto.RegisterType((*GetTracepointInfoRequest)(nil), "px.vizier.services.metadata.GetTracepointInfoRequest") + proto.RegisterType((*GetTracepointInfoResponse)(nil), "px.vizier.services.metadata.GetTracepointInfoResponse") + proto.RegisterType((*GetTracepointInfoResponse_TracepointState)(nil), "px.vizier.services.metadata.GetTracepointInfoResponse.TracepointState") + proto.RegisterType((*RemoveTracepointRequest)(nil), "px.vizier.services.metadata.RemoveTracepointRequest") + proto.RegisterType((*RemoveTracepointResponse)(nil), "px.vizier.services.metadata.RemoveTracepointResponse") + proto.RegisterType((*UpdateConfigRequest)(nil), "px.vizier.services.metadata.UpdateConfigRequest") + proto.RegisterType((*UpdateConfigResponse)(nil), "px.vizier.services.metadata.UpdateConfigResponse") + proto.RegisterType((*GetScriptsRequest)(nil), "px.vizier.services.metadata.GetScriptsRequest") + proto.RegisterType((*GetScriptsResponse)(nil), "px.vizier.services.metadata.GetScriptsResponse") + proto.RegisterMapType((map[string]*cvmsgspb.CronScript)(nil), "px.vizier.services.metadata.GetScriptsResponse.ScriptsEntry") + proto.RegisterType((*AddOrUpdateScriptRequest)(nil), "px.vizier.services.metadata.AddOrUpdateScriptRequest") + proto.RegisterType((*AddOrUpdateScriptResponse)(nil), "px.vizier.services.metadata.AddOrUpdateScriptResponse") + proto.RegisterType((*DeleteScriptRequest)(nil), "px.vizier.services.metadata.DeleteScriptRequest") + proto.RegisterType((*DeleteScriptResponse)(nil), "px.vizier.services.metadata.DeleteScriptResponse") + proto.RegisterType((*SetScriptsRequest)(nil), "px.vizier.services.metadata.SetScriptsRequest") + proto.RegisterMapType((map[string]*cvmsgspb.CronScript)(nil), "px.vizier.services.metadata.SetScriptsRequest.ScriptsEntry") + proto.RegisterType((*SetScriptsResponse)(nil), "px.vizier.services.metadata.SetScriptsResponse") + proto.RegisterType((*ExecutionStats)(nil), "px.vizier.services.metadata.ExecutionStats") + proto.RegisterType((*RecordExecutionResultRequest)(nil), "px.vizier.services.metadata.RecordExecutionResultRequest") + proto.RegisterType((*RecordExecutionResultResponse)(nil), "px.vizier.services.metadata.RecordExecutionResultResponse") + proto.RegisterType((*GetAllExecutionResultsRequest)(nil), "px.vizier.services.metadata.GetAllExecutionResultsRequest") + proto.RegisterType((*GetAllExecutionResultsResponse)(nil), "px.vizier.services.metadata.GetAllExecutionResultsResponse") + proto.RegisterType((*GetAllExecutionResultsResponse_ExecutionResult)(nil), "px.vizier.services.metadata.GetAllExecutionResultsResponse.ExecutionResult") } -func (m *ExecutionStats) GetBytesProcessed() int64 { - if m != nil { - return m.BytesProcessed - } - return 0 +func init() { + proto.RegisterFile("src/vizier/services/metadata/metadatapb/service.proto", fileDescriptor_bfe4468195647430) } -func (m *ExecutionStats) GetRecordsProcessed() int64 { - if m != nil { - return m.RecordsProcessed - } - return 0 +var fileDescriptor_bfe4468195647430 = []byte{ + // 2017 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xdd, 0x6f, 0x23, 0x57, + 0x15, 0xf7, 0x78, 0xf2, 0xe1, 0x9c, 0x64, 0xf3, 0x71, 0xe3, 0x6c, 0x1d, 0x2f, 0x75, 0xb6, 0x23, + 0xa0, 0xab, 0x4d, 0x77, 0xa6, 0x6b, 0xba, 0x4d, 0xd9, 0x96, 0xaa, 0x9b, 0xb8, 0x4d, 0xac, 0x6c, + 0xdb, 0x30, 0xce, 0x06, 0x89, 0x17, 0x6b, 0x3c, 0x73, 0xe3, 0x1d, 0xea, 0xf9, 0x60, 0xe6, 0x3a, + 0x24, 0x08, 0x09, 0x84, 0xc4, 0x1b, 0xaa, 0xe8, 0x03, 0x48, 0x7d, 0x03, 0xf1, 0x02, 0xcf, 0xfc, + 0x01, 0x08, 0x9e, 0x78, 0xdc, 0x27, 0x54, 0x21, 0xb4, 0x22, 0xde, 0x17, 0x9e, 0x50, 0xff, 0x04, + 0x74, 0xbf, 0xec, 0xb1, 0x3d, 0xb6, 0xe3, 0x80, 0x78, 0xe2, 0x29, 0x77, 0xce, 0x9c, 0xf3, 0xbb, + 0xe7, 0xfe, 0xce, 0xb9, 0xf7, 0xfe, 0xc6, 0x81, 0x07, 0x71, 0x64, 0x1b, 0x67, 0xee, 0x0f, 0x5d, + 0x1c, 0x19, 0x31, 0x8e, 0xce, 0x5c, 0x1b, 0xc7, 0x86, 0x87, 0x89, 0xe5, 0x58, 0xc4, 0xea, 0x0e, + 0xc2, 0x86, 0x7c, 0xa9, 0x87, 0x51, 0x40, 0x02, 0x74, 0x2b, 0x3c, 0xd7, 0x79, 0x94, 0x2e, 0xa3, + 0x74, 0xe9, 0x5c, 0xcc, 0x37, 0x83, 0x66, 0xc0, 0xfc, 0x0c, 0x3a, 0xe2, 0x21, 0xc5, 0x52, 0x33, + 0x08, 0x9a, 0x2d, 0x6c, 0xb0, 0xa7, 0x46, 0xfb, 0xd4, 0x70, 0xda, 0x91, 0x45, 0xdc, 0xc0, 0x17, + 0xef, 0xb7, 0x06, 0xdf, 0x13, 0xd7, 0xc3, 0x31, 0xb1, 0xbc, 0x50, 0x3a, 0xd0, 0x54, 0xad, 0xd0, + 0xe5, 0x1e, 0x46, 0xbb, 0xed, 0x3a, 0x61, 0x83, 0xfd, 0x11, 0x0e, 0x3b, 0xd4, 0xc1, 0xb6, 0x22, + 0x3f, 0x20, 0x46, 0xd8, 0xb2, 0x7c, 0x1f, 0x47, 0x86, 0xe3, 0xc6, 0x24, 0x72, 0x1b, 0x6d, 0x82, + 0xa9, 0x73, 0xe2, 0xa9, 0x4e, 0x3d, 0x44, 0xe0, 0xb7, 0xd2, 0x02, 0x2f, 0x7c, 0xcb, 0x73, 0xed, + 0x3a, 0x89, 0x2c, 0xdb, 0xf5, 0x9b, 0x86, 0x1b, 0x19, 0xad, 0xa0, 0xe9, 0xda, 0x56, 0x2b, 0x6c, + 0xc8, 0x91, 0x08, 0xff, 0x1a, 0x0b, 0x0f, 0x3c, 0x2f, 0xf0, 0x8d, 0x86, 0x15, 0x63, 0x23, 0x26, + 0x16, 0x69, 0xc7, 0x94, 0x34, 0x36, 0x48, 0xba, 0x11, 0xab, 0xd1, 0xc2, 0xf5, 0x98, 0x04, 0x11, + 0x36, 0x62, 0xfb, 0x29, 0xf6, 0x18, 0xb7, 0x6c, 0x20, 0xdc, 0xee, 0x25, 0x2a, 0xe2, 0xe1, 0x38, + 0xb6, 0x9a, 0xac, 0x22, 0x7c, 0x10, 0x36, 0xba, 0x43, 0xe1, 0xae, 0xa7, 0x15, 0x30, 0x7e, 0x6a, + 0x45, 0xd8, 0x31, 0xac, 0x26, 0xf6, 0x49, 0xd8, 0xe0, 0x7f, 0x85, 0xff, 0x6d, 0xea, 0x2f, 0xde, + 0xdb, 0x67, 0x5e, 0xdc, 0xa4, 0x98, 0x7c, 0xc0, 0x3d, 0xb4, 0x15, 0xb8, 0x51, 0x63, 0x09, 0x99, + 0xf8, 0xfb, 0x6d, 0x1c, 0x13, 0xad, 0x0a, 0xcb, 0xd2, 0x10, 0x87, 0x81, 0x1f, 0x63, 0xb4, 0x03, + 0x73, 0x3c, 0xe7, 0x42, 0xf6, 0xb6, 0x72, 0x67, 0xb1, 0xbc, 0xa5, 0x87, 0xe7, 0x7a, 0x62, 0x69, + 0xba, 0x5c, 0x9a, 0x2e, 0x02, 0x85, 0xbb, 0x86, 0x60, 0xf5, 0x11, 0x4d, 0xa6, 0xea, 0x9f, 0x06, + 0x12, 0xbe, 0x06, 0x6b, 0x09, 0x9b, 0x98, 0xe1, 0x5d, 0x98, 0x71, 0xfd, 0xd3, 0xa0, 0xa0, 0xdc, + 0x56, 0xef, 0x2c, 0x96, 0xef, 0xea, 0x63, 0xfa, 0x4d, 0x67, 0xd1, 0x1f, 0x8a, 0x27, 0x93, 0xc5, + 0x69, 0x97, 0x0a, 0xdc, 0xe8, 0xb3, 0xa3, 0x77, 0x60, 0x96, 0xf1, 0x50, 0x50, 0x58, 0xca, 0x5f, + 0x4f, 0x83, 0xe4, 0xbc, 0xe8, 0x9c, 0x2f, 0x16, 0x6e, 0xf2, 0x20, 0x54, 0x81, 0x39, 0x5e, 0x4c, + 0xb1, 0xe2, 0xd7, 0xae, 0x16, 0x5e, 0x63, 0x31, 0xa6, 0x88, 0x45, 0x8f, 0x61, 0x91, 0xb7, 0x59, + 0x9d, 0x2d, 0x4e, 0x65, 0x50, 0xdb, 0x14, 0x8a, 0x9b, 0x75, 0xd1, 0x7d, 0x7a, 0x5f, 0xdb, 0xea, + 0x7b, 0xec, 0x25, 0xe3, 0x07, 0xec, 0xee, 0x58, 0xfb, 0x5c, 0x81, 0x75, 0x36, 0xcb, 0x93, 0xd0, + 0xb1, 0x08, 0x8e, 0x05, 0xa1, 0xa8, 0x0a, 0xeb, 0x9e, 0x75, 0x5e, 0x6f, 0x33, 0x6b, 0xdd, 0xf5, + 0x09, 0x8e, 0xce, 0xac, 0x96, 0x58, 0xf7, 0xa6, 0xce, 0xf7, 0x99, 0x2e, 0xf7, 0x99, 0x5e, 0x11, + 0xfb, 0xd0, 0x5c, 0xf3, 0xac, 0x73, 0x0e, 0x55, 0x15, 0x31, 0x68, 0x07, 0x0a, 0x3d, 0xa8, 0xb8, + 0x1e, 0xe2, 0xa8, 0x1e, 0x89, 0x12, 0x31, 0x22, 0x66, 0xcd, 0x8d, 0x6e, 0x50, 0x7c, 0x84, 0x23, + 0x59, 0x3f, 0xed, 0x5f, 0x0a, 0x2c, 0x26, 0x72, 0x43, 0x3b, 0x90, 0x63, 0xb4, 0xd4, 0x5d, 0x47, + 0x24, 0xb2, 0x42, 0x97, 0xcd, 0x37, 0xb1, 0xfe, 0xe4, 0x49, 0xb5, 0xb2, 0xbb, 0xd8, 0x79, 0xbe, + 0x35, 0xcf, 0x3b, 0xa1, 0x62, 0xce, 0x33, 0xef, 0xaa, 0x83, 0x8a, 0x30, 0xef, 0xe0, 0x16, 0x26, + 0xd8, 0x61, 0x13, 0xe6, 0x0e, 0x32, 0xa6, 0x34, 0xa0, 0x77, 0x65, 0x49, 0xd5, 0x69, 0x4a, 0x7a, + 0x90, 0x91, 0x45, 0x7d, 0x0f, 0x16, 0x68, 0x6b, 0xf0, 0x62, 0xcc, 0x30, 0x8c, 0x57, 0x12, 0x18, + 0xdd, 0x9d, 0xc6, 0xc2, 0x2a, 0x16, 0xb1, 0x28, 0xed, 0x07, 0x19, 0x33, 0xe7, 0x88, 0xf1, 0x6e, + 0x0e, 0xe6, 0x38, 0x37, 0xda, 0x67, 0x59, 0xc8, 0xf7, 0x17, 0x43, 0x74, 0xf2, 0x87, 0x70, 0x83, + 0xaf, 0x5c, 0x90, 0x28, 0x5a, 0xfa, 0xce, 0xe4, 0x96, 0xe6, 0x48, 0xe6, 0x92, 0x95, 0x80, 0x45, + 0x47, 0x12, 0x8e, 0xef, 0x28, 0xda, 0x8f, 0xea, 0x95, 0x9a, 0x88, 0xef, 0x44, 0xd6, 0x44, 0x1c, + 0x91, 0x1b, 0x62, 0x54, 0x86, 0x8d, 0x3e, 0x44, 0x91, 0xa8, 0xc3, 0x58, 0xcd, 0x99, 0xeb, 0x49, + 0x67, 0x9e, 0x85, 0x83, 0xbe, 0x0a, 0xcb, 0xd8, 0x77, 0xea, 0xc1, 0x69, 0xfd, 0x0c, 0x47, 0xb1, + 0x1b, 0xf8, 0x8c, 0xbe, 0x9c, 0xb9, 0x84, 0x7d, 0xe7, 0xe3, 0xd3, 0x13, 0x6e, 0xd3, 0x2a, 0x90, + 0xff, 0x8e, 0x4b, 0x9e, 0x1e, 0x45, 0xf8, 0xd4, 0x3d, 0x3f, 0xc4, 0x17, 0xb2, 0x41, 0x6f, 0xc2, + 0x5c, 0xc8, 0x6c, 0xac, 0x15, 0x16, 0x4c, 0xf1, 0x84, 0xf2, 0x30, 0xcb, 0xba, 0x92, 0x55, 0x7a, + 0xc1, 0xe4, 0x0f, 0xda, 0xa7, 0x0a, 0x6c, 0x0c, 0xc0, 0x08, 0x6a, 0xf7, 0x41, 0xfd, 0xe4, 0x4c, + 0x12, 0xfa, 0x60, 0x2c, 0xa1, 0xa9, 0x00, 0xfa, 0xe1, 0x89, 0x49, 0x11, 0x8a, 0xaf, 0x41, 0xf6, + 0xf0, 0x04, 0xad, 0x82, 0xfa, 0x09, 0xbe, 0x10, 0x39, 0xd1, 0x21, 0x4d, 0xe8, 0xcc, 0x6a, 0xb5, + 0x79, 0xaf, 0x2f, 0x99, 0xfc, 0x41, 0xfb, 0x5b, 0x16, 0x36, 0x4d, 0xdc, 0x74, 0x63, 0x82, 0xa3, + 0xe3, 0xc8, 0xb2, 0x71, 0x18, 0xb8, 0x3e, 0x91, 0x8b, 0x73, 0x20, 0x17, 0xf1, 0xa1, 0xcc, 0xec, + 0x60, 0x6c, 0x66, 0x23, 0x91, 0xf4, 0x21, 0x8b, 0xd9, 0x45, 0x2e, 0xfe, 0x55, 0x81, 0xb5, 0xe1, + 0xb9, 0x7f, 0x00, 0x1b, 0xa4, 0x6b, 0xac, 0x3b, 0x38, 0x6c, 0x05, 0x17, 0x5e, 0xef, 0xcc, 0xdb, + 0x4d, 0x6b, 0x92, 0xfe, 0x7b, 0x4e, 0x77, 0x23, 0x5d, 0xde, 0x6e, 0x3d, 0xfc, 0x4a, 0x17, 0xc9, + 0xcc, 0x93, 0x14, 0x2b, 0x42, 0x30, 0xe3, 0x5b, 0x1e, 0x16, 0x85, 0x63, 0x63, 0xf4, 0x06, 0xa8, + 0x84, 0xb4, 0xc4, 0xde, 0x1c, 0x7d, 0xec, 0xec, 0xce, 0x77, 0x9e, 0x6f, 0xa9, 0xc7, 0xc7, 0x8f, + 0x4d, 0xea, 0xae, 0xfd, 0x21, 0x0b, 0xc5, 0x34, 0x4a, 0x44, 0xc9, 0xbf, 0x07, 0x8b, 0xbd, 0x04, + 0xae, 0x4f, 0xb0, 0xa8, 0x7f, 0xcf, 0x24, 0x0e, 0xea, 0x24, 0x38, 0xda, 0x1e, 0x38, 0xf3, 0xd7, + 0xe9, 0x34, 0xf2, 0x6e, 0xd7, 0xfb, 0x8f, 0xf6, 0xe2, 0x8f, 0x60, 0x75, 0x10, 0x2d, 0x01, 0xa0, + 0x4c, 0x04, 0x40, 0xaf, 0x42, 0xd6, 0x75, 0xc4, 0x4c, 0x43, 0x67, 0xe3, 0x5c, 0xe7, 0xf9, 0x56, + 0xb6, 0x5a, 0x31, 0xb3, 0xae, 0xd3, 0xe5, 0x5a, 0xed, 0x71, 0xad, 0x7d, 0x00, 0x85, 0x7d, 0x4c, + 0x7a, 0x09, 0x24, 0xee, 0x57, 0x74, 0x17, 0x54, 0xd7, 0x91, 0x54, 0x0d, 0x21, 0x33, 0xf6, 0xab, + 0x95, 0xd8, 0xa4, 0x4e, 0xda, 0x6f, 0x54, 0xd8, 0x4c, 0x01, 0x12, 0xe4, 0x3f, 0x4d, 0x23, 0xff, + 0x83, 0xb1, 0xe4, 0x8f, 0x04, 0x1b, 0xe0, 0x1e, 0xf7, 0x51, 0x5f, 0xfc, 0x3c, 0x0b, 0x2b, 0x03, + 0x0e, 0x82, 0x20, 0x65, 0x32, 0x41, 0xf7, 0x61, 0x96, 0x72, 0xca, 0xbb, 0x71, 0xb9, 0x7c, 0xab, + 0x8f, 0xf5, 0xc7, 0xee, 0x29, 0xde, 0xbb, 0xb0, 0x5b, 0x98, 0xcf, 0xca, 0x3d, 0x91, 0x01, 0x39, + 0xee, 0x81, 0xe3, 0x82, 0xca, 0x96, 0x95, 0x5a, 0xab, 0xae, 0x53, 0xb7, 0x08, 0x33, 0x89, 0x86, + 0xdf, 0x85, 0x65, 0x7c, 0x1e, 0x62, 0x9b, 0xaa, 0x4b, 0x9e, 0xc0, 0xec, 0xe4, 0x04, 0x6e, 0xc8, + 0x10, 0xbe, 0xc8, 0x57, 0x60, 0x89, 0x1f, 0xc3, 0x75, 0x0a, 0x19, 0x17, 0xe6, 0x6e, 0xab, 0x77, + 0x16, 0xcc, 0x45, 0x6e, 0xfb, 0x88, 0x9a, 0x34, 0x03, 0x5e, 0x32, 0xb1, 0x17, 0x9c, 0xe1, 0xe1, + 0xfd, 0x9f, 0x87, 0x59, 0x1e, 0xa6, 0xb0, 0x30, 0xfe, 0xa0, 0xed, 0x43, 0x61, 0x38, 0x40, 0x94, + 0x74, 0x9a, 0x16, 0xd5, 0x6c, 0x58, 0xe7, 0x17, 0xc0, 0x5e, 0xe0, 0x9f, 0xba, 0x4d, 0x39, 0xeb, + 0x84, 0x73, 0x73, 0x41, 0x9c, 0x9b, 0xf4, 0xd2, 0xe0, 0x17, 0x4d, 0x18, 0x38, 0xf5, 0x44, 0x0b, + 0xf3, 0xeb, 0xe8, 0x28, 0x70, 0xe8, 0xfa, 0xb4, 0x3d, 0xc8, 0xf7, 0x4f, 0x72, 0x9d, 0x4c, 0xd7, + 0x61, 0x6d, 0x1f, 0x93, 0x9a, 0x1d, 0xb9, 0x21, 0x91, 0xba, 0x48, 0xfb, 0x93, 0x02, 0x28, 0x69, + 0x15, 0xc0, 0x27, 0x30, 0x1f, 0x73, 0x93, 0xe8, 0xe8, 0x77, 0x26, 0x75, 0xf4, 0x00, 0x82, 0x2e, + 0x9e, 0xdf, 0xf7, 0x49, 0x74, 0x61, 0x4a, 0xb0, 0x62, 0x0d, 0x96, 0x92, 0x2f, 0x52, 0x68, 0xba, + 0x97, 0xa4, 0x69, 0xb1, 0xfc, 0x12, 0x3b, 0x9e, 0x85, 0x26, 0xd7, 0xf7, 0xa2, 0xc0, 0xe7, 0xf1, + 0x82, 0xbf, 0x87, 0xd9, 0xb7, 0x14, 0xed, 0x10, 0x0a, 0x8f, 0x1c, 0xe7, 0xe3, 0x88, 0x53, 0x24, + 0xde, 0x8b, 0x3a, 0x18, 0x54, 0x95, 0x53, 0x83, 0x60, 0x68, 0x24, 0x9e, 0x70, 0xd3, 0x6e, 0xc1, + 0x66, 0x0a, 0x98, 0x50, 0x70, 0xdf, 0x86, 0xf5, 0x0a, 0xd3, 0x59, 0xfd, 0x93, 0x3c, 0x84, 0x05, + 0x1e, 0x3d, 0x46, 0xc9, 0x2d, 0x75, 0x9e, 0x6f, 0xe5, 0x78, 0x58, 0xb5, 0x62, 0xe6, 0xb8, 0x7f, + 0xd5, 0xd1, 0x6e, 0x42, 0xbe, 0x1f, 0x52, 0x4c, 0xf5, 0x47, 0x05, 0xd6, 0x6a, 0x83, 0xe5, 0x42, + 0x4f, 0x06, 0xeb, 0xf2, 0xf6, 0xd8, 0xba, 0x0c, 0x01, 0xfc, 0x2f, 0xcb, 0x92, 0x07, 0x54, 0x1b, + 0xea, 0x0b, 0xed, 0xcf, 0x0a, 0x2c, 0xbf, 0x7f, 0x8e, 0xed, 0x36, 0xbd, 0xe7, 0x68, 0x87, 0xc6, + 0xe8, 0x2e, 0xac, 0x61, 0x69, 0xa9, 0xd3, 0x2f, 0xdc, 0xba, 0xcf, 0x1b, 0x5a, 0x35, 0x57, 0xba, + 0x2f, 0x8e, 0x5d, 0x0f, 0x7f, 0x14, 0x23, 0x1d, 0xd6, 0xed, 0xc0, 0x0b, 0xdd, 0x96, 0xd5, 0xe7, + 0x9d, 0x65, 0xde, 0x6b, 0x89, 0x57, 0xc2, 0xff, 0x55, 0x58, 0x69, 0x5c, 0x30, 0x99, 0x1e, 0x05, + 0x36, 0x8e, 0x63, 0x21, 0xe1, 0x54, 0x73, 0x99, 0x99, 0x8f, 0xa4, 0x15, 0x6d, 0xc3, 0x5a, 0x84, + 0xed, 0x20, 0x72, 0x92, 0xae, 0x33, 0xcc, 0x75, 0x55, 0xbc, 0xe8, 0x3a, 0x6b, 0xbf, 0xcd, 0xc2, + 0x57, 0x4c, 0x66, 0xec, 0x2e, 0xc5, 0xc4, 0x71, 0xbb, 0xf5, 0xdf, 0xe8, 0x08, 0xf4, 0x16, 0x2c, + 0x74, 0x3f, 0xf3, 0x05, 0xdd, 0xc5, 0x21, 0xa5, 0x70, 0x2c, 0x3d, 0xcc, 0x9e, 0x33, 0xda, 0x86, + 0x59, 0x1c, 0x45, 0x41, 0x24, 0xf4, 0x45, 0xda, 0x69, 0x40, 0x85, 0x3e, 0xf3, 0x41, 0x27, 0xd0, + 0x23, 0x97, 0x1d, 0xcd, 0xb1, 0x90, 0xfb, 0xdb, 0x63, 0x5b, 0xaa, 0xbf, 0x76, 0x07, 0x19, 0x73, + 0x19, 0xf7, 0x59, 0xa8, 0xfc, 0x8f, 0x18, 0x17, 0xda, 0x16, 0xbc, 0x3c, 0x82, 0x24, 0xd1, 0x0b, + 0x5b, 0xf0, 0xf2, 0x3e, 0x26, 0x8f, 0x5a, 0xad, 0x01, 0x87, 0xee, 0xe9, 0xf4, 0x6b, 0x15, 0x4a, + 0xa3, 0x3c, 0xc4, 0x49, 0x85, 0x61, 0x9e, 0x4f, 0x27, 0x77, 0xc4, 0xe1, 0xa4, 0x93, 0x6a, 0x0c, + 0x9a, 0x3e, 0x98, 0xa9, 0xc4, 0x2e, 0xfe, 0x2a, 0x0b, 0x2b, 0x03, 0x2f, 0xff, 0x5f, 0xe4, 0x76, + 0x8b, 0x94, 0xff, 0xae, 0xc2, 0x8a, 0xfc, 0x3d, 0xa1, 0xc6, 0x81, 0xd0, 0x39, 0xac, 0x50, 0x9e, + 0x93, 0x9f, 0x68, 0xaf, 0x5f, 0xf5, 0xd3, 0x4e, 0xd6, 0xbe, 0x78, 0x7f, 0x8a, 0x08, 0x5e, 0xbd, + 0xd7, 0x15, 0x84, 0x01, 0xd8, 0x5d, 0xc4, 0xbf, 0xe2, 0xc6, 0xff, 0x44, 0xd2, 0xf7, 0x83, 0x4e, + 0x71, 0xfb, 0x4a, 0xbe, 0xa2, 0xe9, 0x3c, 0x58, 0x92, 0x0b, 0xa4, 0xfa, 0x0d, 0xdd, 0x9b, 0x9c, + 0x6b, 0x42, 0x7d, 0x16, 0xf5, 0xab, 0xba, 0x8b, 0xe9, 0x2e, 0x60, 0x75, 0x1f, 0x93, 0xbe, 0xcf, + 0x35, 0x74, 0x7f, 0x9a, 0x4f, 0x3b, 0x3e, 0x6d, 0x79, 0xfa, 0xaf, 0xc1, 0xf2, 0xef, 0x55, 0xd8, + 0x94, 0xe5, 0x4d, 0x88, 0x4f, 0x51, 0xe8, 0x9f, 0x29, 0x80, 0x86, 0x3f, 0x25, 0xd0, 0x9b, 0xd7, + 0xfb, 0xb8, 0x2b, 0xee, 0x5c, 0xf3, 0x9b, 0x05, 0xfd, 0x54, 0x61, 0xda, 0xa6, 0x5f, 0x55, 0xa3, + 0x07, 0xd3, 0xaa, 0x70, 0x9e, 0xc5, 0x9b, 0xd7, 0x13, 0xef, 0xe8, 0xc7, 0xb0, 0x3a, 0x28, 0x29, + 0xd1, 0x1b, 0x13, 0x56, 0x94, 0x2a, 0x59, 0x8b, 0x0f, 0xa6, 0x8c, 0x12, 0xb5, 0xfa, 0xb9, 0x02, + 0x1b, 0xb2, 0x56, 0x5c, 0x28, 0xca, 0x3a, 0xc5, 0xb0, 0x94, 0xd4, 0x8f, 0x13, 0x76, 0x63, 0x8a, + 0x9e, 0x9d, 0xb0, 0x1b, 0xd3, 0xc4, 0x69, 0xf9, 0x97, 0x73, 0x70, 0xb3, 0xa7, 0x0c, 0x6a, 0x24, + 0x88, 0xb0, 0xcc, 0xc7, 0x13, 0xdb, 0x94, 0x49, 0x03, 0xa4, 0x5f, 0x59, 0x5b, 0xf2, 0x5c, 0x8c, + 0x29, 0xb5, 0x28, 0x6b, 0x8f, 0x21, 0x51, 0x37, 0xa1, 0x3d, 0x46, 0x29, 0xca, 0x09, 0xed, 0x31, + 0x52, 0x3b, 0xd2, 0x1a, 0x24, 0x85, 0xde, 0x84, 0x1a, 0xa4, 0xc8, 0xcc, 0x09, 0x35, 0x48, 0x53, + 0x91, 0x94, 0xe8, 0xda, 0x55, 0x89, 0xae, 0x4d, 0x49, 0xf4, 0xb0, 0xb8, 0x43, 0x9f, 0x2a, 0xb0, + 0x91, 0x7a, 0xe5, 0xa3, 0x6f, 0x4e, 0x68, 0xe9, 0xd1, 0x5a, 0xaa, 0xf8, 0xf0, 0x3a, 0xa1, 0x22, + 0xa1, 0xcf, 0x14, 0xb8, 0x99, 0x7e, 0xe5, 0xa3, 0x87, 0xd7, 0xd2, 0x09, 0x3c, 0xa5, 0xb7, 0xff, + 0x03, 0x8d, 0xb1, 0xfb, 0xde, 0xb3, 0xcb, 0x52, 0xe6, 0x8b, 0xcb, 0x52, 0xe6, 0xcb, 0xcb, 0x92, + 0xf2, 0x93, 0x4e, 0x49, 0xf9, 0x5d, 0xa7, 0xa4, 0xfc, 0xa5, 0x53, 0x52, 0x9e, 0x75, 0x4a, 0xca, + 0x3f, 0x3a, 0x25, 0xe5, 0x9f, 0x9d, 0x52, 0xe6, 0xcb, 0x4e, 0x49, 0xf9, 0xc5, 0x8b, 0x52, 0xe6, + 0xd9, 0x8b, 0x52, 0xe6, 0x8b, 0x17, 0xa5, 0xcc, 0x77, 0xa1, 0xf7, 0x7f, 0xa7, 0xc6, 0x1c, 0x53, + 0x08, 0xdf, 0xf8, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x26, 0x6f, 0xf1, 0xa9, 0x1a, 0x00, + 0x00, } -type RecordExecutionResultRequest struct { - ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` - Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Types that are valid to be assigned to Result: - // *RecordExecutionResultRequest_Error - // *RecordExecutionResultRequest_ExecutionStats - Result isRecordExecutionResultRequest_Result `protobuf_oneof:"result"` -} +func (this *SchemaRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (m *RecordExecutionResultRequest) Reset() { *m = RecordExecutionResultRequest{} } -func (*RecordExecutionResultRequest) ProtoMessage() {} -func (*RecordExecutionResultRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{33} -} -func (m *RecordExecutionResultRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RecordExecutionResultRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RecordExecutionResultRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + that1, ok := that.(*SchemaRequest) + if !ok { + that2, ok := that.(SchemaRequest) + if ok { + that1 = &that2 + } else { + return false } - return b[:n], nil } -} -func (m *RecordExecutionResultRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RecordExecutionResultRequest.Merge(m, src) -} -func (m *RecordExecutionResultRequest) XXX_Size() int { - return m.Size() -} -func (m *RecordExecutionResultRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RecordExecutionResultRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RecordExecutionResultRequest proto.InternalMessageInfo - -type isRecordExecutionResultRequest_Result interface { - isRecordExecutionResultRequest_Result() - Equal(interface{}) bool - MarshalTo([]byte) (int, error) - Size() int -} - -type RecordExecutionResultRequest_Error struct { - Error *statuspb.Status `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` -} -type RecordExecutionResultRequest_ExecutionStats struct { - ExecutionStats *ExecutionStats `protobuf:"bytes,4,opt,name=execution_stats,json=executionStats,proto3,oneof" json:"execution_stats,omitempty"` -} - -func (*RecordExecutionResultRequest_Error) isRecordExecutionResultRequest_Result() {} -func (*RecordExecutionResultRequest_ExecutionStats) isRecordExecutionResultRequest_Result() {} - -func (m *RecordExecutionResultRequest) GetResult() isRecordExecutionResultRequest_Result { - if m != nil { - return m.Result + if that1 == nil { + return this == nil + } else if this == nil { + return false } - return nil + return true } - -func (m *RecordExecutionResultRequest) GetScriptID() *uuidpb.UUID { - if m != nil { - return m.ScriptID +func (this *SchemaResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - return nil -} -func (m *RecordExecutionResultRequest) GetTimestamp() *types.Timestamp { - if m != nil { - return m.Timestamp + that1, ok := that.(*SchemaResponse) + if !ok { + that2, ok := that.(SchemaResponse) + if ok { + that1 = &that2 + } else { + return false + } } - return nil -} - -func (m *RecordExecutionResultRequest) GetError() *statuspb.Status { - if x, ok := m.GetResult().(*RecordExecutionResultRequest_Error); ok { - return x.Error + if that1 == nil { + return this == nil + } else if this == nil { + return false } - return nil -} - -func (m *RecordExecutionResultRequest) GetExecutionStats() *ExecutionStats { - if x, ok := m.GetResult().(*RecordExecutionResultRequest_ExecutionStats); ok { - return x.ExecutionStats + if !this.Schema.Equal(that1.Schema) { + return false } - return nil + return true } - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*RecordExecutionResultRequest) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*RecordExecutionResultRequest_Error)(nil), - (*RecordExecutionResultRequest_ExecutionStats)(nil), +func (this *AgentInfoRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } -} - -type RecordExecutionResultResponse struct { -} -func (m *RecordExecutionResultResponse) Reset() { *m = RecordExecutionResultResponse{} } -func (*RecordExecutionResultResponse) ProtoMessage() {} -func (*RecordExecutionResultResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{34} -} -func (m *RecordExecutionResultResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RecordExecutionResultResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RecordExecutionResultResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + that1, ok := that.(*AgentInfoRequest) + if !ok { + that2, ok := that.(AgentInfoRequest) + if ok { + that1 = &that2 + } else { + return false } - return b[:n], nil } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true } -func (m *RecordExecutionResultResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RecordExecutionResultResponse.Merge(m, src) -} -func (m *RecordExecutionResultResponse) XXX_Size() int { - return m.Size() -} -func (m *RecordExecutionResultResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RecordExecutionResultResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RecordExecutionResultResponse proto.InternalMessageInfo - -type GetAllExecutionResultsRequest struct { -} +func (this *AgentInfoResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } -func (m *GetAllExecutionResultsRequest) Reset() { *m = GetAllExecutionResultsRequest{} } -func (*GetAllExecutionResultsRequest) ProtoMessage() {} -func (*GetAllExecutionResultsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{35} -} -func (m *GetAllExecutionResultsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllExecutionResultsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllExecutionResultsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + that1, ok := that.(*AgentInfoResponse) + if !ok { + that2, ok := that.(AgentInfoResponse) + if ok { + that1 = &that2 + } else { + return false } - return b[:n], nil } -} -func (m *GetAllExecutionResultsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllExecutionResultsRequest.Merge(m, src) -} -func (m *GetAllExecutionResultsRequest) XXX_Size() int { - return m.Size() -} -func (m *GetAllExecutionResultsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllExecutionResultsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAllExecutionResultsRequest proto.InternalMessageInfo - -type GetAllExecutionResultsResponse struct { - Results []*GetAllExecutionResultsResponse_ExecutionResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` -} - -func (m *GetAllExecutionResultsResponse) Reset() { *m = GetAllExecutionResultsResponse{} } -func (*GetAllExecutionResultsResponse) ProtoMessage() {} -func (*GetAllExecutionResultsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{36} -} -func (m *GetAllExecutionResultsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllExecutionResultsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllExecutionResultsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Info) != len(that1.Info) { + return false + } + for i := range this.Info { + if !this.Info[i].Equal(that1.Info[i]) { + return false } - return b[:n], nil } + return true } -func (m *GetAllExecutionResultsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllExecutionResultsResponse.Merge(m, src) -} -func (m *GetAllExecutionResultsResponse) XXX_Size() int { - return m.Size() -} -func (m *GetAllExecutionResultsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllExecutionResultsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAllExecutionResultsResponse proto.InternalMessageInfo - -func (m *GetAllExecutionResultsResponse) GetResults() []*GetAllExecutionResultsResponse_ExecutionResult { - if m != nil { - return m.Results +func (this *AgentMetadata) Equal(that interface{}) bool { + if that == nil { + return this == nil } - return nil -} - -type GetAllExecutionResultsResponse_ExecutionResult struct { - ScriptID *uuidpb.UUID `protobuf:"bytes,1,opt,name=script_id,json=scriptId,proto3" json:"script_id,omitempty"` - Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Types that are valid to be assigned to Result: - // *GetAllExecutionResultsResponse_ExecutionResult_Error - // *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats - Result isGetAllExecutionResultsResponse_ExecutionResult_Result `protobuf_oneof:"result"` -} -func (m *GetAllExecutionResultsResponse_ExecutionResult) Reset() { - *m = GetAllExecutionResultsResponse_ExecutionResult{} -} -func (*GetAllExecutionResultsResponse_ExecutionResult) ProtoMessage() {} -func (*GetAllExecutionResultsResponse_ExecutionResult) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe4468195647430, []int{36, 0} -} -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + that1, ok := that.(*AgentMetadata) + if !ok { + that2, ok := that.(AgentMetadata) + if ok { + that1 = &that2 + } else { + return false } - return b[:n], nil } -} -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.Merge(m, src) -} -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_Size() int { - return m.Size() -} -func (m *GetAllExecutionResultsResponse_ExecutionResult) XXX_DiscardUnknown() { - xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAllExecutionResultsResponse_ExecutionResult proto.InternalMessageInfo - -type isGetAllExecutionResultsResponse_ExecutionResult_Result interface { - isGetAllExecutionResultsResponse_ExecutionResult_Result() - Equal(interface{}) bool - MarshalTo([]byte) (int, error) - Size() int -} - -type GetAllExecutionResultsResponse_ExecutionResult_Error struct { - Error *statuspb.Status `protobuf:"bytes,3,opt,name=error,proto3,oneof" json:"error,omitempty"` -} -type GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats struct { - ExecutionStats *ExecutionStats `protobuf:"bytes,4,opt,name=execution_stats,json=executionStats,proto3,oneof" json:"execution_stats,omitempty"` -} - -func (*GetAllExecutionResultsResponse_ExecutionResult_Error) isGetAllExecutionResultsResponse_ExecutionResult_Result() { -} -func (*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) isGetAllExecutionResultsResponse_ExecutionResult_Result() { -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetResult() isGetAllExecutionResultsResponse_ExecutionResult_Result { - if m != nil { - return m.Result + if that1 == nil { + return this == nil + } else if this == nil { + return false } - return nil -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetScriptID() *uuidpb.UUID { - if m != nil { - return m.ScriptID + if !this.Agent.Equal(that1.Agent) { + return false } - return nil -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetTimestamp() *types.Timestamp { - if m != nil { - return m.Timestamp + if !this.Status.Equal(that1.Status) { + return false } - return nil -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetError() *statuspb.Status { - if x, ok := m.GetResult().(*GetAllExecutionResultsResponse_ExecutionResult_Error); ok { - return x.Error + if !this.CarnotInfo.Equal(that1.CarnotInfo) { + return false } - return nil + return true } - -func (m *GetAllExecutionResultsResponse_ExecutionResult) GetExecutionStats() *ExecutionStats { - if x, ok := m.GetResult().(*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats); ok { - return x.ExecutionStats +func (this *AgentUpdatesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - return nil -} -// XXX_OneofWrappers is for the internal use of the proto package. -func (*GetAllExecutionResultsResponse_ExecutionResult) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*GetAllExecutionResultsResponse_ExecutionResult_Error)(nil), - (*GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats)(nil), - } -} - -func init() { - proto.RegisterType((*SchemaRequest)(nil), "px.vizier.services.metadata.SchemaRequest") - proto.RegisterType((*SchemaResponse)(nil), "px.vizier.services.metadata.SchemaResponse") - proto.RegisterType((*AgentInfoRequest)(nil), "px.vizier.services.metadata.AgentInfoRequest") - proto.RegisterType((*AgentInfoResponse)(nil), "px.vizier.services.metadata.AgentInfoResponse") - proto.RegisterType((*AgentMetadata)(nil), "px.vizier.services.metadata.AgentMetadata") - proto.RegisterType((*AgentUpdatesRequest)(nil), "px.vizier.services.metadata.AgentUpdatesRequest") - proto.RegisterType((*AgentUpdate)(nil), "px.vizier.services.metadata.AgentUpdate") - proto.RegisterType((*AgentUpdatesResponse)(nil), "px.vizier.services.metadata.AgentUpdatesResponse") - proto.RegisterType((*WithPrefixKeyRequest)(nil), "px.vizier.services.metadata.WithPrefixKeyRequest") - proto.RegisterType((*WithPrefixKeyResponse)(nil), "px.vizier.services.metadata.WithPrefixKeyResponse") - proto.RegisterType((*WithPrefixKeyResponse_KV)(nil), "px.vizier.services.metadata.WithPrefixKeyResponse.KV") - proto.RegisterType((*RegisterFileSourceRequest)(nil), "px.vizier.services.metadata.RegisterFileSourceRequest") - proto.RegisterType((*RegisterFileSourceResponse)(nil), "px.vizier.services.metadata.RegisterFileSourceResponse") - proto.RegisterType((*RegisterFileSourceResponse_FileSourceStatus)(nil), "px.vizier.services.metadata.RegisterFileSourceResponse.FileSourceStatus") - proto.RegisterType((*GetFileSourceInfoRequest)(nil), "px.vizier.services.metadata.GetFileSourceInfoRequest") - proto.RegisterType((*GetFileSourceInfoResponse)(nil), "px.vizier.services.metadata.GetFileSourceInfoResponse") - proto.RegisterType((*GetFileSourceInfoResponse_FileSourceState)(nil), "px.vizier.services.metadata.GetFileSourceInfoResponse.FileSourceState") - proto.RegisterType((*RemoveFileSourceRequest)(nil), "px.vizier.services.metadata.RemoveFileSourceRequest") - proto.RegisterType((*RemoveFileSourceResponse)(nil), "px.vizier.services.metadata.RemoveFileSourceResponse") - proto.RegisterType((*RegisterTracepointRequest)(nil), "px.vizier.services.metadata.RegisterTracepointRequest") - proto.RegisterType((*RegisterTracepointRequest_TracepointRequest)(nil), "px.vizier.services.metadata.RegisterTracepointRequest.TracepointRequest") - proto.RegisterType((*RegisterTracepointResponse)(nil), "px.vizier.services.metadata.RegisterTracepointResponse") - proto.RegisterType((*RegisterTracepointResponse_TracepointStatus)(nil), "px.vizier.services.metadata.RegisterTracepointResponse.TracepointStatus") - proto.RegisterType((*GetTracepointInfoRequest)(nil), "px.vizier.services.metadata.GetTracepointInfoRequest") - proto.RegisterType((*GetTracepointInfoResponse)(nil), "px.vizier.services.metadata.GetTracepointInfoResponse") - proto.RegisterType((*GetTracepointInfoResponse_TracepointState)(nil), "px.vizier.services.metadata.GetTracepointInfoResponse.TracepointState") - proto.RegisterType((*RemoveTracepointRequest)(nil), "px.vizier.services.metadata.RemoveTracepointRequest") - proto.RegisterType((*RemoveTracepointResponse)(nil), "px.vizier.services.metadata.RemoveTracepointResponse") - proto.RegisterType((*UpdateConfigRequest)(nil), "px.vizier.services.metadata.UpdateConfigRequest") - proto.RegisterType((*UpdateConfigResponse)(nil), "px.vizier.services.metadata.UpdateConfigResponse") - proto.RegisterType((*GetScriptsRequest)(nil), "px.vizier.services.metadata.GetScriptsRequest") - proto.RegisterType((*GetScriptsResponse)(nil), "px.vizier.services.metadata.GetScriptsResponse") - proto.RegisterMapType((map[string]*cvmsgspb.CronScript)(nil), "px.vizier.services.metadata.GetScriptsResponse.ScriptsEntry") - proto.RegisterType((*AddOrUpdateScriptRequest)(nil), "px.vizier.services.metadata.AddOrUpdateScriptRequest") - proto.RegisterType((*AddOrUpdateScriptResponse)(nil), "px.vizier.services.metadata.AddOrUpdateScriptResponse") - proto.RegisterType((*DeleteScriptRequest)(nil), "px.vizier.services.metadata.DeleteScriptRequest") - proto.RegisterType((*DeleteScriptResponse)(nil), "px.vizier.services.metadata.DeleteScriptResponse") - proto.RegisterType((*SetScriptsRequest)(nil), "px.vizier.services.metadata.SetScriptsRequest") - proto.RegisterMapType((map[string]*cvmsgspb.CronScript)(nil), "px.vizier.services.metadata.SetScriptsRequest.ScriptsEntry") - proto.RegisterType((*SetScriptsResponse)(nil), "px.vizier.services.metadata.SetScriptsResponse") - proto.RegisterType((*ExecutionStats)(nil), "px.vizier.services.metadata.ExecutionStats") - proto.RegisterType((*RecordExecutionResultRequest)(nil), "px.vizier.services.metadata.RecordExecutionResultRequest") - proto.RegisterType((*RecordExecutionResultResponse)(nil), "px.vizier.services.metadata.RecordExecutionResultResponse") - proto.RegisterType((*GetAllExecutionResultsRequest)(nil), "px.vizier.services.metadata.GetAllExecutionResultsRequest") - proto.RegisterType((*GetAllExecutionResultsResponse)(nil), "px.vizier.services.metadata.GetAllExecutionResultsResponse") - proto.RegisterType((*GetAllExecutionResultsResponse_ExecutionResult)(nil), "px.vizier.services.metadata.GetAllExecutionResultsResponse.ExecutionResult") -} - -func init() { - proto.RegisterFile("src/vizier/services/metadata/metadatapb/service.proto", fileDescriptor_bfe4468195647430) -} - -var fileDescriptor_bfe4468195647430 = []byte{ - // 2204 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xe7, 0x92, 0xfa, 0xa0, 0x9e, 0x64, 0x7d, 0x8c, 0x24, 0x47, 0x62, 0x1a, 0xca, 0x59, 0xb4, - 0x8d, 0x61, 0xc5, 0xbb, 0xb1, 0x1a, 0x59, 0xa9, 0x93, 0x06, 0xb1, 0xc4, 0x58, 0x22, 0xe4, 0x24, - 0xea, 0x52, 0x56, 0x81, 0x5e, 0x88, 0xe5, 0xee, 0x90, 0xde, 0x9a, 0xfb, 0xd1, 0xdd, 0xa5, 0x2a, - 0x15, 0x05, 0x5a, 0x14, 0xe8, 0xad, 0x08, 0x9a, 0x43, 0x0b, 0xe4, 0xd6, 0x8f, 0x4b, 0x7b, 0x6e, - 0xef, 0x45, 0x7b, 0xea, 0xd1, 0xa7, 0x22, 0x28, 0x0a, 0xa3, 0xa6, 0x2f, 0x3d, 0x15, 0xf9, 0x13, - 0x8a, 0xf9, 0x5a, 0xee, 0x92, 0x4b, 0x2e, 0xa9, 0x16, 0x39, 0xe5, 0xa4, 0xe1, 0x9b, 0xf7, 0xde, - 0xbc, 0xf9, 0xfd, 0xde, 0xbc, 0x7d, 0x33, 0x10, 0xec, 0x06, 0xbe, 0xa1, 0x9e, 0x5b, 0x3f, 0xb4, - 0xb0, 0xaf, 0x06, 0xd8, 0x3f, 0xb7, 0x0c, 0x1c, 0xa8, 0x36, 0x0e, 0x75, 0x53, 0x0f, 0xf5, 0x68, - 0xe0, 0x35, 0xc4, 0xa4, 0xe2, 0xf9, 0x6e, 0xe8, 0xa2, 0x97, 0xbd, 0x0b, 0x85, 0x59, 0x29, 0xc2, - 0x4a, 0x11, 0xca, 0xa5, 0xb5, 0x96, 0xdb, 0x72, 0xa9, 0x9e, 0x4a, 0x46, 0xcc, 0xa4, 0x54, 0x6e, - 0xb9, 0x6e, 0xab, 0x8d, 0x55, 0xfa, 0xab, 0xd1, 0x69, 0xaa, 0x66, 0xc7, 0xd7, 0x43, 0xcb, 0x75, - 0xf8, 0xfc, 0x56, 0xff, 0x7c, 0x68, 0xd9, 0x38, 0x08, 0x75, 0xdb, 0x13, 0x0a, 0x24, 0x54, 0xdd, - 0xb3, 0x98, 0x86, 0xda, 0xe9, 0x58, 0xa6, 0xd7, 0xa0, 0x7f, 0xb8, 0xc2, 0x1e, 0x51, 0x30, 0x74, - 0xdf, 0x71, 0x43, 0xd5, 0x6b, 0xeb, 0x8e, 0x83, 0x7d, 0xd5, 0xb4, 0x82, 0xd0, 0xb7, 0x1a, 0x9d, - 0x10, 0x13, 0xe5, 0xd8, 0xaf, 0x3a, 0xd1, 0xe0, 0x86, 0xdf, 0x4a, 0x33, 0xbc, 0x74, 0x74, 0xdb, - 0x32, 0xea, 0xa1, 0xaf, 0x1b, 0x96, 0xd3, 0x52, 0x2d, 0x5f, 0x6d, 0xbb, 0x2d, 0xcb, 0xd0, 0xdb, - 0x5e, 0x43, 0x8c, 0xb8, 0xb9, 0x9a, 0x62, 0xde, 0xb4, 0xda, 0xb8, 0x1e, 0xb8, 0x1d, 0xdf, 0xc0, - 0x31, 0x53, 0x6e, 0xf0, 0x35, 0x6a, 0xe0, 0xda, 0xb6, 0xeb, 0xa8, 0x0d, 0x3d, 0xc0, 0x6a, 0x10, - 0xea, 0x61, 0x27, 0x20, 0x28, 0xd3, 0x41, 0x5c, 0x2d, 0xd4, 0x1b, 0xc4, 0x53, 0xe8, 0xfa, 0x58, - 0x0d, 0x8c, 0xc7, 0xd8, 0xa6, 0x64, 0xd0, 0x01, 0x57, 0xbb, 0x1d, 0xa3, 0xd0, 0xc6, 0x41, 0xa0, - 0xb7, 0x28, 0x85, 0x6c, 0xe0, 0x35, 0xa2, 0x21, 0x57, 0x57, 0xd2, 0x18, 0x0f, 0x1e, 0xeb, 0x3e, - 0x36, 0x55, 0xbd, 0x85, 0x9d, 0xd0, 0x6b, 0xb0, 0xbf, 0x5c, 0xff, 0x06, 0xd1, 0xe7, 0xf3, 0xc6, - 0xb9, 0x1d, 0xb4, 0x88, 0x4f, 0x36, 0x60, 0x1a, 0xf2, 0x12, 0x5c, 0xab, 0xd1, 0x80, 0x34, 0xfc, - 0xfd, 0x0e, 0x0e, 0x42, 0xb9, 0x0a, 0x8b, 0x42, 0x10, 0x78, 0xae, 0x13, 0x60, 0xb4, 0x07, 0x33, - 0x2c, 0xe6, 0x8d, 0xfc, 0x0d, 0xe9, 0xe6, 0xfc, 0xce, 0x96, 0xe2, 0x5d, 0x28, 0xb1, 0xad, 0x29, - 0x62, 0x6b, 0x0a, 0x37, 0xe4, 0xea, 0x32, 0x82, 0xe5, 0xfb, 0x24, 0x98, 0xaa, 0xd3, 0x74, 0x85, - 0xfb, 0x1a, 0xac, 0xc4, 0x64, 0x7c, 0x85, 0x77, 0x61, 0xca, 0x72, 0x9a, 0xee, 0x86, 0x74, 0xa3, - 0x70, 0x73, 0x7e, 0xe7, 0x96, 0x32, 0x22, 0x41, 0x15, 0x6a, 0xfd, 0x01, 0xff, 0xa5, 0x51, 0x3b, - 0xf9, 0xb9, 0x04, 0xd7, 0x12, 0x72, 0xf4, 0x0e, 0x4c, 0x53, 0x1c, 0x36, 0x24, 0x1a, 0xf2, 0xd7, - 0xd3, 0x5c, 0x32, 0x5c, 0x14, 0x86, 0x17, 0x35, 0xd7, 0x98, 0x11, 0xaa, 0xc0, 0x0c, 0x23, 0x93, - 0xef, 0xf8, 0xf5, 0xf1, 0xcc, 0x6b, 0xd4, 0x46, 0xe3, 0xb6, 0xe8, 0x21, 0xcc, 0xb3, 0xc4, 0xaa, - 0xd3, 0xcd, 0x15, 0xa8, 0xab, 0x6d, 0xe2, 0x8a, 0x89, 0x15, 0x9e, 0x6f, 0x4a, 0x22, 0xcf, 0x95, - 0x03, 0x3a, 0x49, 0xf1, 0x01, 0x23, 0x1a, 0xcb, 0x9f, 0x4a, 0xb0, 0x4a, 0x57, 0x79, 0xe4, 0x99, - 0x7a, 0x88, 0x03, 0x0e, 0x28, 0xaa, 0xc2, 0xaa, 0xad, 0x5f, 0xd4, 0x3b, 0x54, 0x5a, 0xb7, 0x9c, - 0x10, 0xfb, 0xe7, 0x7a, 0x9b, 0xef, 0x7b, 0x53, 0x61, 0x07, 0x53, 0x11, 0x07, 0x53, 0xa9, 0xf0, - 0x83, 0xab, 0xad, 0xd8, 0xfa, 0x05, 0x73, 0x55, 0xe5, 0x36, 0x68, 0x0f, 0x36, 0x7a, 0xae, 0x82, - 0xba, 0x87, 0xfd, 0xba, 0xcf, 0x29, 0xa2, 0x40, 0x4c, 0x6b, 0xeb, 0x91, 0x51, 0x70, 0x82, 0x7d, - 0xc1, 0x9f, 0xfc, 0x1f, 0x09, 0xe6, 0x63, 0xb1, 0xa1, 0x3d, 0x28, 0x52, 0x58, 0xea, 0x96, 0xc9, - 0x03, 0x59, 0x22, 0xdb, 0x66, 0xa7, 0x5e, 0x79, 0xf4, 0xa8, 0x5a, 0xd9, 0x9f, 0xef, 0x3e, 0xdb, - 0x9a, 0x65, 0x99, 0x50, 0xd1, 0x66, 0xa9, 0x76, 0xd5, 0x44, 0x25, 0x98, 0x35, 0x71, 0x1b, 0x87, - 0xd8, 0xa4, 0x0b, 0x16, 0x8f, 0x72, 0x9a, 0x10, 0xa0, 0x77, 0x05, 0xa5, 0x85, 0x49, 0x28, 0x3d, - 0xca, 0x09, 0x52, 0xdf, 0x83, 0x39, 0x92, 0x1a, 0x8c, 0x8c, 0x29, 0xea, 0xe3, 0xd5, 0x98, 0x8f, - 0xe8, 0xa4, 0x51, 0xb3, 0x8a, 0x1e, 0xea, 0x04, 0xf6, 0xa3, 0x9c, 0x56, 0x34, 0xf9, 0x78, 0xbf, - 0x08, 0x33, 0x0c, 0x1b, 0xf9, 0x93, 0x3c, 0xac, 0x25, 0xc9, 0xe0, 0x99, 0xfc, 0x01, 0x5c, 0x63, - 0x3b, 0xe7, 0x20, 0xf2, 0x94, 0xbe, 0x99, 0x9d, 0xd2, 0xcc, 0x93, 0xb6, 0xa0, 0xc7, 0xdc, 0xa2, - 0x13, 0xe1, 0x8e, 0x9d, 0x28, 0x92, 0x8f, 0x85, 0xb1, 0x92, 0x88, 0x9d, 0x44, 0x9a, 0x44, 0xcc, - 0x23, 0x13, 0x04, 0x68, 0x07, 0xd6, 0x13, 0x1e, 0x79, 0xa0, 0x26, 0x45, 0xb5, 0xa8, 0xad, 0xc6, - 0x95, 0x59, 0x14, 0x26, 0xfa, 0x2a, 0x2c, 0x62, 0xc7, 0xac, 0xbb, 0xcd, 0xfa, 0x39, 0xf6, 0x03, - 0xcb, 0x75, 0x28, 0x7c, 0x45, 0x6d, 0x01, 0x3b, 0xe6, 0x47, 0xcd, 0x33, 0x26, 0x93, 0x2b, 0xb0, - 0xf6, 0x1d, 0x2b, 0x7c, 0x7c, 0xe2, 0xe3, 0xa6, 0x75, 0x71, 0x8c, 0x2f, 0x45, 0x82, 0x5e, 0x87, - 0x19, 0x8f, 0xca, 0x68, 0x2a, 0xcc, 0x69, 0xfc, 0x17, 0x5a, 0x83, 0x69, 0x9a, 0x95, 0x94, 0xe9, - 0x39, 0x8d, 0xfd, 0x90, 0x3f, 0x96, 0x60, 0xbd, 0xcf, 0x0d, 0x87, 0xf6, 0x10, 0x0a, 0x4f, 0xce, - 0x05, 0xa0, 0xbb, 0x23, 0x01, 0x4d, 0x75, 0xa0, 0x1c, 0x9f, 0x69, 0xc4, 0x43, 0xe9, 0x75, 0xc8, - 0x1f, 0x9f, 0xa1, 0x65, 0x28, 0x3c, 0xc1, 0x97, 0x3c, 0x26, 0x32, 0x24, 0x01, 0x9d, 0xeb, 0xed, - 0x0e, 0xcb, 0xf5, 0x05, 0x8d, 0xfd, 0x90, 0x5d, 0xd8, 0xd4, 0x70, 0xcb, 0x0a, 0x42, 0xec, 0x3f, - 0xb0, 0xda, 0xb8, 0x46, 0x3f, 0x0b, 0x62, 0x6f, 0x1a, 0x14, 0x7d, 0x36, 0x14, 0x81, 0xdd, 0x4d, - 0xa1, 0x26, 0xf6, 0x3d, 0x51, 0x2c, 0x5f, 0xe9, 0xb9, 0xa9, 0x60, 0xaf, 0xed, 0x5e, 0xda, 0xa4, - 0xf2, 0x44, 0x7e, 0xe4, 0x3f, 0xe5, 0xa1, 0x94, 0xb6, 0x22, 0x87, 0xe1, 0x09, 0x2c, 0xc4, 0xfc, - 0x89, 0x65, 0x8f, 0x46, 0xe2, 0x31, 0xdc, 0x5d, 0x2c, 0x18, 0x5e, 0xbd, 0xe6, 0x9b, 0x91, 0x24, - 0x40, 0xdb, 0x7d, 0x85, 0x70, 0x95, 0x2c, 0x23, 0x3e, 0x78, 0x4a, 0xb2, 0xde, 0x95, 0x7e, 0x04, - 0xcb, 0xfd, 0xde, 0x62, 0x0e, 0xa4, 0x4c, 0x07, 0xe8, 0x35, 0xc8, 0x5b, 0x26, 0x5f, 0x69, 0xa0, - 0x60, 0xcc, 0x74, 0x9f, 0x6d, 0xe5, 0xab, 0x15, 0x2d, 0x6f, 0x99, 0x08, 0xc1, 0x94, 0xa3, 0xdb, - 0x98, 0xe6, 0xec, 0x9c, 0x46, 0xc7, 0xf2, 0x03, 0xd8, 0x38, 0xc4, 0x61, 0x2f, 0x80, 0xd8, 0x47, - 0x07, 0xdd, 0x82, 0x82, 0x65, 0x0a, 0xa8, 0x06, 0x3c, 0xcf, 0x76, 0x9f, 0x6d, 0x15, 0xaa, 0x95, - 0x40, 0x23, 0x4a, 0xf2, 0x6f, 0x0b, 0xb0, 0x99, 0xe2, 0x88, 0xa3, 0x6f, 0xa5, 0xa2, 0xff, 0x60, - 0x24, 0xfa, 0x43, 0xbd, 0xf5, 0x81, 0x8f, 0x13, 0xd8, 0x97, 0x3e, 0xcd, 0xc3, 0x52, 0x9f, 0x02, - 0x47, 0x48, 0xca, 0x46, 0xe8, 0x0e, 0x4c, 0x13, 0x50, 0x59, 0x2e, 0x2f, 0xee, 0xbc, 0x9c, 0x80, - 0xfd, 0xa1, 0xd5, 0xc4, 0x07, 0x97, 0x46, 0x9b, 0xaf, 0xca, 0x34, 0x91, 0x0a, 0x45, 0xa6, 0x81, - 0x83, 0x8d, 0x02, 0xdd, 0x56, 0x2a, 0x59, 0x91, 0x52, 0xc4, 0xc2, 0x54, 0x8f, 0x05, 0xb4, 0x0f, - 0x8b, 0xf8, 0xc2, 0xc3, 0x06, 0x69, 0xd2, 0x58, 0x00, 0xd3, 0xd9, 0x01, 0x5c, 0x13, 0x26, 0x6c, - 0x93, 0xaf, 0xc2, 0x02, 0x2b, 0x4e, 0x75, 0xe2, 0x32, 0xd8, 0x98, 0xb9, 0x51, 0xb8, 0x39, 0xa7, - 0xcd, 0x33, 0xd9, 0x87, 0x44, 0x24, 0xab, 0xf0, 0x92, 0x86, 0x6d, 0xf7, 0x1c, 0x0f, 0x1e, 0xc9, - 0x35, 0x98, 0x66, 0x66, 0x12, 0x35, 0x63, 0x3f, 0xe4, 0x43, 0xd8, 0x18, 0x34, 0xe0, 0x9c, 0x4e, - 0x92, 0xa3, 0xf2, 0x3f, 0xf2, 0xbd, 0x7a, 0x70, 0xea, 0xeb, 0x06, 0xf6, 0x5c, 0xcb, 0x09, 0xc5, - 0xe2, 0xe6, 0x40, 0x3d, 0x18, 0xef, 0x60, 0x0e, 0x78, 0x52, 0x06, 0x24, 0xbd, 0x0a, 0x51, 0xfa, - 0xbb, 0x04, 0x2b, 0x83, 0x6b, 0xff, 0x00, 0xd6, 0xc3, 0x48, 0x58, 0x37, 0xa3, 0xd2, 0xc2, 0x77, - 0xb5, 0x9f, 0xf6, 0xcd, 0x48, 0xf6, 0xc9, 0xa4, 0x38, 0x89, 0x66, 0xb7, 0xe7, 0x3f, 0x56, 0xa4, - 0xd6, 0xc2, 0x14, 0x69, 0x94, 0x07, 0xf9, 0x58, 0x1e, 0xbc, 0x09, 0x85, 0x30, 0x6c, 0xf3, 0x4f, - 0xf5, 0xf0, 0x2e, 0x84, 0x9d, 0xbd, 0xd3, 0xd3, 0x87, 0x1a, 0x51, 0x97, 0xff, 0x18, 0x2b, 0x7d, - 0xf1, 0x0d, 0x72, 0xa2, 0xbe, 0x07, 0xf3, 0xbd, 0x00, 0xae, 0x0e, 0x30, 0x3f, 0x7c, 0x3d, 0x91, - 0xa8, 0x7c, 0x31, 0xe7, 0x13, 0x57, 0xbe, 0x7e, 0x6f, 0x5f, 0x78, 0xe5, 0xeb, 0x05, 0x70, 0xd5, - 0xca, 0xf7, 0x1b, 0x56, 0xf9, 0xfa, 0x1d, 0x71, 0xf0, 0x1f, 0xa7, 0x81, 0x9f, 0x59, 0xf8, 0xd2, - 0x9d, 0xf5, 0x61, 0x8f, 0x13, 0xd0, 0xd3, 0xc2, 0xd7, 0xa7, 0xf0, 0x65, 0xe1, 0xeb, 0x2f, 0x7c, - 0x83, 0xe7, 0x3f, 0xa3, 0xf0, 0xa5, 0x9c, 0xa7, 0x89, 0x0a, 0x9f, 0x01, 0xab, 0xac, 0x1f, 0x3c, - 0x70, 0x9d, 0xa6, 0xd5, 0x12, 0xab, 0x66, 0xb4, 0x51, 0x73, 0xbc, 0x8d, 0x22, 0x3d, 0x24, 0xeb, - 0x3b, 0x3d, 0xd7, 0xac, 0xc7, 0x52, 0x98, 0x75, 0xa7, 0x27, 0xae, 0x49, 0xf6, 0x27, 0x1f, 0xc0, - 0x5a, 0x72, 0x91, 0xab, 0x44, 0xba, 0x0a, 0x2b, 0x87, 0x38, 0xac, 0x19, 0xbe, 0xe5, 0x85, 0xe2, - 0x9a, 0x24, 0xff, 0x45, 0x02, 0x14, 0x97, 0x72, 0xc7, 0x67, 0x30, 0x1b, 0x30, 0x11, 0xcf, 0xe8, - 0x77, 0xb2, 0x32, 0xba, 0xcf, 0x83, 0xc2, 0x7f, 0xbf, 0xef, 0x84, 0xfe, 0xa5, 0x26, 0x9c, 0x95, - 0x6a, 0xb0, 0x10, 0x9f, 0x48, 0x81, 0xe9, 0x76, 0x1c, 0xa6, 0xf9, 0x9d, 0x97, 0x68, 0x79, 0xe6, - 0x57, 0x74, 0xe5, 0xc0, 0x77, 0x1d, 0x66, 0xcf, 0xf1, 0xbb, 0x97, 0x7f, 0x4b, 0x92, 0x8f, 0x61, - 0xe3, 0xbe, 0x69, 0x7e, 0xe4, 0x33, 0x88, 0xf8, 0x3c, 0xe7, 0x41, 0x25, 0x97, 0x74, 0x22, 0xe0, - 0x08, 0x0d, 0xf5, 0xc7, 0xd5, 0xe4, 0x97, 0x61, 0x33, 0xc5, 0x19, 0xbf, 0xd0, 0x7d, 0x1b, 0x56, - 0x2b, 0xf4, 0xda, 0x95, 0x5c, 0xe4, 0x1e, 0xcc, 0x31, 0xeb, 0x11, 0x17, 0xbb, 0x85, 0xee, 0xb3, - 0xad, 0x22, 0x33, 0xab, 0x56, 0xb4, 0x22, 0xd3, 0xaf, 0x9a, 0xf2, 0x75, 0x58, 0x4b, 0xba, 0xe4, - 0x4b, 0xfd, 0x59, 0x82, 0x95, 0x5a, 0x3f, 0x5d, 0xe8, 0x51, 0x3f, 0x2f, 0x6f, 0x8f, 0xe4, 0x65, - 0xc0, 0xc1, 0x17, 0x49, 0xcb, 0x1a, 0xa0, 0xda, 0x40, 0x5e, 0xc8, 0x7f, 0x95, 0x60, 0xf1, 0xfd, - 0x0b, 0x6c, 0x74, 0xc8, 0x77, 0x8e, 0x64, 0x68, 0x80, 0x6e, 0xc1, 0x0a, 0x16, 0x92, 0x7a, 0x68, - 0xd9, 0xb8, 0xee, 0xb0, 0x84, 0x2e, 0x68, 0x4b, 0xd1, 0xc4, 0xa9, 0x65, 0xe3, 0x0f, 0x03, 0xa4, - 0xc0, 0xaa, 0xe1, 0xda, 0x9e, 0xd5, 0xd6, 0x13, 0xda, 0x79, 0xaa, 0xbd, 0x12, 0x9b, 0xe2, 0xfa, - 0xaf, 0xc1, 0x52, 0xe3, 0x92, 0xde, 0xda, 0x7d, 0xd7, 0xc0, 0x41, 0xc0, 0x6f, 0x74, 0x05, 0x6d, - 0x91, 0x8a, 0x4f, 0x84, 0x14, 0x6d, 0xc3, 0x8a, 0x8f, 0x0d, 0xd7, 0x37, 0xe3, 0xaa, 0x53, 0x54, - 0x75, 0x99, 0x4f, 0x44, 0xca, 0xf2, 0xef, 0xf2, 0xf0, 0x15, 0x8d, 0x0a, 0xa3, 0xad, 0x68, 0x38, - 0xe8, 0xb4, 0xff, 0x1f, 0x19, 0x81, 0xde, 0x82, 0xb9, 0xe8, 0x99, 0x90, 0xc3, 0x5d, 0x1a, 0xe8, - 0x14, 0x4e, 0x85, 0x86, 0xd6, 0x53, 0x46, 0xdb, 0x30, 0x8d, 0x7d, 0xdf, 0xf5, 0x79, 0x7f, 0x91, - 0x56, 0x0d, 0xc8, 0xbd, 0x9f, 0xea, 0xa0, 0x33, 0xe8, 0x81, 0x4b, 0x4b, 0x73, 0xc0, 0x6f, 0xff, - 0xdb, 0x23, 0x53, 0x2a, 0xc9, 0xdd, 0x51, 0x4e, 0x5b, 0xc4, 0x09, 0xc9, 0x7e, 0x11, 0x66, 0x7c, - 0x8a, 0x85, 0xbc, 0x05, 0xaf, 0x0c, 0x01, 0x89, 0xe7, 0xc2, 0x16, 0xbc, 0x72, 0x88, 0xc3, 0xfb, - 0xed, 0x76, 0x9f, 0x42, 0x54, 0x9d, 0x7e, 0x5d, 0x80, 0xf2, 0x30, 0x0d, 0x5e, 0xa9, 0x30, 0xcc, - 0xb2, 0xe5, 0xc4, 0x89, 0x38, 0xce, 0xaa, 0x54, 0x23, 0xbc, 0x29, 0xfd, 0x91, 0x0a, 0xdf, 0xa5, - 0x5f, 0xe5, 0x61, 0xa9, 0x6f, 0xf2, 0x4b, 0x92, 0x3b, 0xed, 0x70, 0xe7, 0x9f, 0x05, 0x58, 0x12, - 0xcf, 0x8b, 0x35, 0xe6, 0x08, 0x5d, 0xc0, 0x12, 0xc1, 0x39, 0xfe, 0x62, 0xf3, 0xc6, 0xb8, 0x2f, - 0x3d, 0x82, 0xfb, 0xd2, 0x9d, 0x09, 0x2c, 0x18, 0x7b, 0x6f, 0x48, 0x08, 0x03, 0xd0, 0x6f, 0x11, - 0x7b, 0xd4, 0x19, 0xfd, 0x62, 0x9a, 0x78, 0xdf, 0x2d, 0x6d, 0x8f, 0xa5, 0xcb, 0x93, 0xce, 0x86, - 0x05, 0xb1, 0x41, 0xd2, 0xbf, 0xa1, 0xdb, 0xd9, 0xb1, 0xc6, 0xba, 0xcf, 0x92, 0x32, 0xae, 0x3a, - 0x5f, 0xee, 0x12, 0x96, 0x0f, 0x71, 0x98, 0x78, 0xbd, 0x41, 0x77, 0x26, 0x79, 0xe9, 0x61, 0xcb, - 0xee, 0x4c, 0xfe, 0x38, 0xb4, 0xf3, 0x87, 0x02, 0x6c, 0x0a, 0x7a, 0x63, 0xb7, 0x6e, 0x4e, 0xf4, - 0xcf, 0x24, 0x40, 0x83, 0x8f, 0x28, 0xe8, 0xee, 0xc4, 0xaf, 0x2e, 0x2c, 0xc0, 0xbd, 0x2b, 0xbe, - 0xd6, 0xa0, 0x9f, 0x4a, 0xb4, 0xb7, 0x49, 0x3e, 0x27, 0xa0, 0xdd, 0x49, 0x9f, 0x1f, 0x58, 0x14, - 0x77, 0xaf, 0xf6, 0x6a, 0x81, 0x7e, 0x0c, 0xcb, 0xfd, 0x77, 0x69, 0xf4, 0x66, 0xc6, 0x8e, 0x52, - 0xef, 0xea, 0xa5, 0xdd, 0x09, 0xad, 0x52, 0xb8, 0x8a, 0x5d, 0x14, 0x52, 0xb8, 0xea, 0xcd, 0x8e, - 0xc9, 0xd5, 0x40, 0x5b, 0x3d, 0x26, 0x57, 0x29, 0xdd, 0x35, 0xe7, 0x2a, 0x79, 0x03, 0xca, 0xe6, - 0x2a, 0xf5, 0x1e, 0x97, 0xcd, 0xd5, 0x90, 0x5b, 0x5b, 0xc4, 0x55, 0x0c, 0x89, 0x71, 0xb8, 0x1a, - 0xc4, 0x61, 0x77, 0x42, 0x2b, 0xce, 0xd5, 0xcf, 0x25, 0x58, 0x17, 0x5c, 0xb1, 0xa6, 0x5e, 0xf0, - 0x14, 0xc0, 0x42, 0xbc, 0xd7, 0xcf, 0xa8, 0x9c, 0x29, 0x77, 0x8f, 0x8c, 0xca, 0x99, 0x76, 0x91, - 0xd8, 0xf9, 0xe5, 0x0c, 0x5c, 0xef, 0x75, 0x71, 0xb5, 0xd0, 0xf5, 0xa3, 0x33, 0x6e, 0xf3, 0x92, - 0x4a, 0xdb, 0x38, 0xa4, 0x8c, 0x7d, 0x0f, 0x60, 0xb1, 0xa8, 0x13, 0xde, 0x1b, 0x68, 0x7a, 0x0c, - 0x34, 0xe0, 0x19, 0xe9, 0x31, 0xac, 0xfb, 0xcf, 0x48, 0x8f, 0xa1, 0x7d, 0x3e, 0xe1, 0x20, 0xde, - 0x94, 0x67, 0x70, 0x90, 0x72, 0x25, 0xc8, 0xe0, 0x20, 0xad, 0xe3, 0x27, 0x40, 0xd7, 0xc6, 0x05, - 0xba, 0x36, 0x21, 0xd0, 0x83, 0x8d, 0x38, 0xfa, 0x58, 0x82, 0xf5, 0xd4, 0xf6, 0x0c, 0x7d, 0x33, - 0x23, 0xa5, 0x87, 0xf7, 0xbd, 0xa5, 0x7b, 0x57, 0x31, 0xe5, 0x01, 0x7d, 0x22, 0xc1, 0xf5, 0xf4, - 0xf6, 0x0c, 0xdd, 0xbb, 0x52, 0x4f, 0xc7, 0x42, 0x7a, 0xfb, 0x7f, 0xe8, 0x07, 0xf7, 0xdf, 0x7b, - 0xfa, 0xbc, 0x9c, 0xfb, 0xec, 0x79, 0x39, 0xf7, 0xf9, 0xf3, 0xb2, 0xf4, 0x93, 0x6e, 0x59, 0xfa, - 0x7d, 0xb7, 0x2c, 0xfd, 0xad, 0x5b, 0x96, 0x9e, 0x76, 0xcb, 0xd2, 0xbf, 0xba, 0x65, 0xe9, 0xdf, - 0xdd, 0x72, 0xee, 0xf3, 0x6e, 0x59, 0xfa, 0xc5, 0x8b, 0x72, 0xee, 0xe9, 0x8b, 0x72, 0xee, 0xb3, - 0x17, 0xe5, 0xdc, 0x77, 0xa1, 0xf7, 0x3f, 0x06, 0x8d, 0x19, 0xda, 0xcd, 0x7d, 0xe3, 0xbf, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xa6, 0x68, 0x46, 0x22, 0x95, 0x20, 0x00, 0x00, -} - -func (this *SchemaRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*SchemaRequest) - if !ok { - that2, ok := that.(SchemaRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *SchemaResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*SchemaResponse) - if !ok { - that2, ok := that.(SchemaResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Schema.Equal(that1.Schema) { - return false - } - return true -} -func (this *AgentInfoRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*AgentInfoRequest) - if !ok { - that2, ok := that.(AgentInfoRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *AgentInfoResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*AgentInfoResponse) - if !ok { - that2, ok := that.(AgentInfoResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Info) != len(that1.Info) { - return false - } - for i := range this.Info { - if !this.Info[i].Equal(that1.Info[i]) { - return false - } - } - return true -} -func (this *AgentMetadata) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*AgentMetadata) - if !ok { - that2, ok := that.(AgentMetadata) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Agent.Equal(that1.Agent) { - return false - } - if !this.Status.Equal(that1.Status) { - return false - } - if !this.CarnotInfo.Equal(that1.CarnotInfo) { - return false - } - return true -} -func (this *AgentUpdatesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*AgentUpdatesRequest) - if !ok { - that2, ok := that.(AgentUpdatesRequest) - if ok { - that1 = &that2 - } else { - return false - } + that1, ok := that.(*AgentUpdatesRequest) + if !ok { + that2, ok := that.(AgentUpdatesRequest) + if ok { + that1 = &that2 + } else { + return false + } } if that1 == nil { return this == nil @@ -2888,14 +2456,14 @@ func (this *WithPrefixKeyResponse_KV) Equal(that interface{}) bool { } return true } -func (this *RegisterFileSourceRequest) Equal(that interface{}) bool { +func (this *RegisterTracepointRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterFileSourceRequest) + that1, ok := that.(*RegisterTracepointRequest) if !ok { - that2, ok := that.(RegisterFileSourceRequest) + that2, ok := that.(RegisterTracepointRequest) if ok { that1 = &that2 } else { @@ -2917,14 +2485,44 @@ func (this *RegisterFileSourceRequest) Equal(that interface{}) bool { } return true } -func (this *RegisterFileSourceResponse) Equal(that interface{}) bool { +func (this *RegisterTracepointRequest_TracepointRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RegisterTracepointRequest_TracepointRequest) + if !ok { + that2, ok := that.(RegisterTracepointRequest_TracepointRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TracepointDeployment.Equal(that1.TracepointDeployment) { + return false + } + if this.Name != that1.Name { + return false + } + if !this.TTL.Equal(that1.TTL) { + return false + } + return true +} +func (this *RegisterTracepointResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterFileSourceResponse) + that1, ok := that.(*RegisterTracepointResponse) if !ok { - that2, ok := that.(RegisterFileSourceResponse) + that2, ok := that.(RegisterTracepointResponse) if ok { that1 = &that2 } else { @@ -2936,11 +2534,11 @@ func (this *RegisterFileSourceResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.FileSources) != len(that1.FileSources) { + if len(this.Tracepoints) != len(that1.Tracepoints) { return false } - for i := range this.FileSources { - if !this.FileSources[i].Equal(that1.FileSources[i]) { + for i := range this.Tracepoints { + if !this.Tracepoints[i].Equal(that1.Tracepoints[i]) { return false } } @@ -2949,14 +2547,14 @@ func (this *RegisterFileSourceResponse) Equal(that interface{}) bool { } return true } -func (this *RegisterFileSourceResponse_FileSourceStatus) Equal(that interface{}) bool { +func (this *RegisterTracepointResponse_TracepointStatus) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterFileSourceResponse_FileSourceStatus) + that1, ok := that.(*RegisterTracepointResponse_TracepointStatus) if !ok { - that2, ok := that.(RegisterFileSourceResponse_FileSourceStatus) + that2, ok := that.(RegisterTracepointResponse_TracepointStatus) if ok { that1 = &that2 } else { @@ -2979,14 +2577,14 @@ func (this *RegisterFileSourceResponse_FileSourceStatus) Equal(that interface{}) } return true } -func (this *GetFileSourceInfoRequest) Equal(that interface{}) bool { +func (this *GetTracepointInfoRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetFileSourceInfoRequest) + that1, ok := that.(*GetTracepointInfoRequest) if !ok { - that2, ok := that.(GetFileSourceInfoRequest) + that2, ok := that.(GetTracepointInfoRequest) if ok { that1 = &that2 } else { @@ -3008,14 +2606,14 @@ func (this *GetFileSourceInfoRequest) Equal(that interface{}) bool { } return true } -func (this *GetFileSourceInfoResponse) Equal(that interface{}) bool { +func (this *GetTracepointInfoResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetFileSourceInfoResponse) + that1, ok := that.(*GetTracepointInfoResponse) if !ok { - that2, ok := that.(GetFileSourceInfoResponse) + that2, ok := that.(GetTracepointInfoResponse) if ok { that1 = &that2 } else { @@ -3027,24 +2625,24 @@ func (this *GetFileSourceInfoResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.FileSources) != len(that1.FileSources) { + if len(this.Tracepoints) != len(that1.Tracepoints) { return false } - for i := range this.FileSources { - if !this.FileSources[i].Equal(that1.FileSources[i]) { + for i := range this.Tracepoints { + if !this.Tracepoints[i].Equal(that1.Tracepoints[i]) { return false } } return true } -func (this *GetFileSourceInfoResponse_FileSourceState) Equal(that interface{}) bool { +func (this *GetTracepointInfoResponse_TracepointState) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetFileSourceInfoResponse_FileSourceState) + that1, ok := that.(*GetTracepointInfoResponse_TracepointState) if !ok { - that2, ok := that.(GetFileSourceInfoResponse_FileSourceState) + that2, ok := that.(GetTracepointInfoResponse_TracepointState) if ok { that1 = &that2 } else { @@ -3086,14 +2684,14 @@ func (this *GetFileSourceInfoResponse_FileSourceState) Equal(that interface{}) b } return true } -func (this *RemoveFileSourceRequest) Equal(that interface{}) bool { +func (this *RemoveTracepointRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RemoveFileSourceRequest) + that1, ok := that.(*RemoveTracepointRequest) if !ok { - that2, ok := that.(RemoveFileSourceRequest) + that2, ok := that.(RemoveTracepointRequest) if ok { that1 = &that2 } else { @@ -3115,14 +2713,14 @@ func (this *RemoveFileSourceRequest) Equal(that interface{}) bool { } return true } -func (this *RemoveFileSourceResponse) Equal(that interface{}) bool { +func (this *RemoveTracepointResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RemoveFileSourceResponse) + that1, ok := that.(*RemoveTracepointResponse) if !ok { - that2, ok := that.(RemoveFileSourceResponse) + that2, ok := that.(RemoveTracepointResponse) if ok { that1 = &that2 } else { @@ -3139,14 +2737,14 @@ func (this *RemoveFileSourceResponse) Equal(that interface{}) bool { } return true } -func (this *RegisterTracepointRequest) Equal(that interface{}) bool { +func (this *UpdateConfigRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterTracepointRequest) + that1, ok := that.(*UpdateConfigRequest) if !ok { - that2, ok := that.(RegisterTracepointRequest) + that2, ok := that.(UpdateConfigRequest) if ok { that1 = &that2 } else { @@ -3158,24 +2756,25 @@ func (this *RegisterTracepointRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Requests) != len(that1.Requests) { + if this.Key != that1.Key { return false } - for i := range this.Requests { - if !this.Requests[i].Equal(that1.Requests[i]) { - return false - } + if this.Value != that1.Value { + return false + } + if this.AgentPodName != that1.AgentPodName { + return false } return true } -func (this *RegisterTracepointRequest_TracepointRequest) Equal(that interface{}) bool { +func (this *UpdateConfigResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterTracepointRequest_TracepointRequest) + that1, ok := that.(*UpdateConfigResponse) if !ok { - that2, ok := that.(RegisterTracepointRequest_TracepointRequest) + that2, ok := that.(UpdateConfigResponse) if ok { that1 = &that2 } else { @@ -3187,25 +2786,19 @@ func (this *RegisterTracepointRequest_TracepointRequest) Equal(that interface{}) } else if this == nil { return false } - if !this.TracepointDeployment.Equal(that1.TracepointDeployment) { - return false - } - if this.Name != that1.Name { - return false - } - if !this.TTL.Equal(that1.TTL) { + if !this.Status.Equal(that1.Status) { return false } return true } -func (this *RegisterTracepointResponse) Equal(that interface{}) bool { +func (this *GetScriptsRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterTracepointResponse) + that1, ok := that.(*GetScriptsRequest) if !ok { - that2, ok := that.(RegisterTracepointResponse) + that2, ok := that.(GetScriptsRequest) if ok { that1 = &that2 } else { @@ -3217,27 +2810,16 @@ func (this *RegisterTracepointResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Tracepoints) != len(that1.Tracepoints) { - return false - } - for i := range this.Tracepoints { - if !this.Tracepoints[i].Equal(that1.Tracepoints[i]) { - return false - } - } - if !this.Status.Equal(that1.Status) { - return false - } return true } -func (this *RegisterTracepointResponse_TracepointStatus) Equal(that interface{}) bool { +func (this *GetScriptsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RegisterTracepointResponse_TracepointStatus) + that1, ok := that.(*GetScriptsResponse) if !ok { - that2, ok := that.(RegisterTracepointResponse_TracepointStatus) + that2, ok := that.(GetScriptsResponse) if ok { that1 = &that2 } else { @@ -3249,25 +2831,24 @@ func (this *RegisterTracepointResponse_TracepointStatus) Equal(that interface{}) } else if this == nil { return false } - if !this.Status.Equal(that1.Status) { - return false - } - if !this.ID.Equal(that1.ID) { + if len(this.Scripts) != len(that1.Scripts) { return false } - if this.Name != that1.Name { - return false + for i := range this.Scripts { + if !this.Scripts[i].Equal(that1.Scripts[i]) { + return false + } } return true } -func (this *GetTracepointInfoRequest) Equal(that interface{}) bool { +func (this *AddOrUpdateScriptRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetTracepointInfoRequest) + that1, ok := that.(*AddOrUpdateScriptRequest) if !ok { - that2, ok := that.(GetTracepointInfoRequest) + that2, ok := that.(AddOrUpdateScriptRequest) if ok { that1 = &that2 } else { @@ -3279,24 +2860,19 @@ func (this *GetTracepointInfoRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.IDs) != len(that1.IDs) { + if !this.Script.Equal(that1.Script) { return false } - for i := range this.IDs { - if !this.IDs[i].Equal(that1.IDs[i]) { - return false - } - } return true } -func (this *GetTracepointInfoResponse) Equal(that interface{}) bool { +func (this *AddOrUpdateScriptResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetTracepointInfoResponse) + that1, ok := that.(*AddOrUpdateScriptResponse) if !ok { - that2, ok := that.(GetTracepointInfoResponse) + that2, ok := that.(AddOrUpdateScriptResponse) if ok { that1 = &that2 } else { @@ -3308,24 +2884,16 @@ func (this *GetTracepointInfoResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Tracepoints) != len(that1.Tracepoints) { - return false - } - for i := range this.Tracepoints { - if !this.Tracepoints[i].Equal(that1.Tracepoints[i]) { - return false - } - } return true } -func (this *GetTracepointInfoResponse_TracepointState) Equal(that interface{}) bool { +func (this *DeleteScriptRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetTracepointInfoResponse_TracepointState) + that1, ok := that.(*DeleteScriptRequest) if !ok { - that2, ok := that.(GetTracepointInfoResponse_TracepointState) + that2, ok := that.(DeleteScriptRequest) if ok { that1 = &that2 } else { @@ -3337,44 +2905,40 @@ func (this *GetTracepointInfoResponse_TracepointState) Equal(that interface{}) b } else if this == nil { return false } - if !this.ID.Equal(that1.ID) { - return false - } - if this.State != that1.State { + if !this.ScriptID.Equal(that1.ScriptID) { return false } - if len(this.Statuses) != len(that1.Statuses) { - return false + return true +} +func (this *DeleteScriptResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil } - for i := range this.Statuses { - if !this.Statuses[i].Equal(that1.Statuses[i]) { + + that1, ok := that.(*DeleteScriptResponse) + if !ok { + that2, ok := that.(DeleteScriptResponse) + if ok { + that1 = &that2 + } else { return false } } - if this.Name != that1.Name { - return false - } - if this.ExpectedState != that1.ExpectedState { - return false - } - if len(this.SchemaNames) != len(that1.SchemaNames) { + if that1 == nil { + return this == nil + } else if this == nil { return false } - for i := range this.SchemaNames { - if this.SchemaNames[i] != that1.SchemaNames[i] { - return false - } - } return true } -func (this *RemoveTracepointRequest) Equal(that interface{}) bool { +func (this *SetScriptsRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RemoveTracepointRequest) + that1, ok := that.(*SetScriptsRequest) if !ok { - that2, ok := that.(RemoveTracepointRequest) + that2, ok := that.(SetScriptsRequest) if ok { that1 = &that2 } else { @@ -3386,24 +2950,24 @@ func (this *RemoveTracepointRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Names) != len(that1.Names) { + if len(this.Scripts) != len(that1.Scripts) { return false } - for i := range this.Names { - if this.Names[i] != that1.Names[i] { + for i := range this.Scripts { + if !this.Scripts[i].Equal(that1.Scripts[i]) { return false } } return true } -func (this *RemoveTracepointResponse) Equal(that interface{}) bool { +func (this *SetScriptsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*RemoveTracepointResponse) + that1, ok := that.(*SetScriptsResponse) if !ok { - that2, ok := that.(RemoveTracepointResponse) + that2, ok := that.(SetScriptsResponse) if ok { that1 = &that2 } else { @@ -3415,19 +2979,16 @@ func (this *RemoveTracepointResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.Status.Equal(that1.Status) { - return false - } return true } -func (this *UpdateConfigRequest) Equal(that interface{}) bool { +func (this *ExecutionStats) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UpdateConfigRequest) + that1, ok := that.(*ExecutionStats) if !ok { - that2, ok := that.(UpdateConfigRequest) + that2, ok := that.(ExecutionStats) if ok { that1 = &that2 } else { @@ -3439,25 +3000,28 @@ func (this *UpdateConfigRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if this.Key != that1.Key { + if this.ExecutionTimeNs != that1.ExecutionTimeNs { return false } - if this.Value != that1.Value { + if this.CompilationTimeNs != that1.CompilationTimeNs { return false } - if this.AgentPodName != that1.AgentPodName { + if this.BytesProcessed != that1.BytesProcessed { + return false + } + if this.RecordsProcessed != that1.RecordsProcessed { return false } return true } -func (this *UpdateConfigResponse) Equal(that interface{}) bool { +func (this *RecordExecutionResultRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*UpdateConfigResponse) + that1, ok := that.(*RecordExecutionResultRequest) if !ok { - that2, ok := that.(UpdateConfigResponse) + that2, ok := that.(RecordExecutionResultRequest) if ok { that1 = &that2 } else { @@ -3469,40 +3033,31 @@ func (this *UpdateConfigResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.Status.Equal(that1.Status) { + if !this.ScriptID.Equal(that1.ScriptID) { return false } - return true -} -func (this *GetScriptsRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil + if !this.Timestamp.Equal(that1.Timestamp) { + return false } - - that1, ok := that.(*GetScriptsRequest) - if !ok { - that2, ok := that.(GetScriptsRequest) - if ok { - that1 = &that2 - } else { + if that1.Result == nil { + if this.Result != nil { return false } - } - if that1 == nil { - return this == nil - } else if this == nil { + } else if this.Result == nil { + return false + } else if !this.Result.Equal(that1.Result) { return false } return true } -func (this *GetScriptsResponse) Equal(that interface{}) bool { +func (this *RecordExecutionResultRequest_Error) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*GetScriptsResponse) + that1, ok := that.(*RecordExecutionResultRequest_Error) if !ok { - that2, ok := that.(GetScriptsResponse) + that2, ok := that.(RecordExecutionResultRequest_Error) if ok { that1 = &that2 } else { @@ -3514,24 +3069,19 @@ func (this *GetScriptsResponse) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Scripts) != len(that1.Scripts) { + if !this.Error.Equal(that1.Error) { return false } - for i := range this.Scripts { - if !this.Scripts[i].Equal(that1.Scripts[i]) { - return false - } - } return true } -func (this *AddOrUpdateScriptRequest) Equal(that interface{}) bool { +func (this *RecordExecutionResultRequest_ExecutionStats) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*AddOrUpdateScriptRequest) + that1, ok := that.(*RecordExecutionResultRequest_ExecutionStats) if !ok { - that2, ok := that.(AddOrUpdateScriptRequest) + that2, ok := that.(RecordExecutionResultRequest_ExecutionStats) if ok { that1 = &that2 } else { @@ -3543,19 +3093,19 @@ func (this *AddOrUpdateScriptRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.Script.Equal(that1.Script) { + if !this.ExecutionStats.Equal(that1.ExecutionStats) { return false } return true } -func (this *AddOrUpdateScriptResponse) Equal(that interface{}) bool { +func (this *RecordExecutionResultResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*AddOrUpdateScriptResponse) + that1, ok := that.(*RecordExecutionResultResponse) if !ok { - that2, ok := that.(AddOrUpdateScriptResponse) + that2, ok := that.(RecordExecutionResultResponse) if ok { that1 = &that2 } else { @@ -3569,14 +3119,14 @@ func (this *AddOrUpdateScriptResponse) Equal(that interface{}) bool { } return true } -func (this *DeleteScriptRequest) Equal(that interface{}) bool { +func (this *GetAllExecutionResultsRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*DeleteScriptRequest) + that1, ok := that.(*GetAllExecutionResultsRequest) if !ok { - that2, ok := that.(DeleteScriptRequest) + that2, ok := that.(GetAllExecutionResultsRequest) if ok { that1 = &that2 } else { @@ -3588,19 +3138,16 @@ func (this *DeleteScriptRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.ScriptID.Equal(that1.ScriptID) { - return false - } return true } -func (this *DeleteScriptResponse) Equal(that interface{}) bool { +func (this *GetAllExecutionResultsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*DeleteScriptResponse) + that1, ok := that.(*GetAllExecutionResultsResponse) if !ok { - that2, ok := that.(DeleteScriptResponse) + that2, ok := that.(GetAllExecutionResultsResponse) if ok { that1 = &that2 } else { @@ -3612,16 +3159,24 @@ func (this *DeleteScriptResponse) Equal(that interface{}) bool { } else if this == nil { return false } + if len(this.Results) != len(that1.Results) { + return false + } + for i := range this.Results { + if !this.Results[i].Equal(that1.Results[i]) { + return false + } + } return true } -func (this *SetScriptsRequest) Equal(that interface{}) bool { +func (this *GetAllExecutionResultsResponse_ExecutionResult) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*SetScriptsRequest) + that1, ok := that.(*GetAllExecutionResultsResponse_ExecutionResult) if !ok { - that2, ok := that.(SetScriptsRequest) + that2, ok := that.(GetAllExecutionResultsResponse_ExecutionResult) if ok { that1 = &that2 } else { @@ -3633,245 +3188,7 @@ func (this *SetScriptsRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Scripts) != len(that1.Scripts) { - return false - } - for i := range this.Scripts { - if !this.Scripts[i].Equal(that1.Scripts[i]) { - return false - } - } - return true -} -func (this *SetScriptsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*SetScriptsResponse) - if !ok { - that2, ok := that.(SetScriptsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *ExecutionStats) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ExecutionStats) - if !ok { - that2, ok := that.(ExecutionStats) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ExecutionTimeNs != that1.ExecutionTimeNs { - return false - } - if this.CompilationTimeNs != that1.CompilationTimeNs { - return false - } - if this.BytesProcessed != that1.BytesProcessed { - return false - } - if this.RecordsProcessed != that1.RecordsProcessed { - return false - } - return true -} -func (this *RecordExecutionResultRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RecordExecutionResultRequest) - if !ok { - that2, ok := that.(RecordExecutionResultRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.ScriptID.Equal(that1.ScriptID) { - return false - } - if !this.Timestamp.Equal(that1.Timestamp) { - return false - } - if that1.Result == nil { - if this.Result != nil { - return false - } - } else if this.Result == nil { - return false - } else if !this.Result.Equal(that1.Result) { - return false - } - return true -} -func (this *RecordExecutionResultRequest_Error) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RecordExecutionResultRequest_Error) - if !ok { - that2, ok := that.(RecordExecutionResultRequest_Error) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Error.Equal(that1.Error) { - return false - } - return true -} -func (this *RecordExecutionResultRequest_ExecutionStats) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RecordExecutionResultRequest_ExecutionStats) - if !ok { - that2, ok := that.(RecordExecutionResultRequest_ExecutionStats) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.ExecutionStats.Equal(that1.ExecutionStats) { - return false - } - return true -} -func (this *RecordExecutionResultResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RecordExecutionResultResponse) - if !ok { - that2, ok := that.(RecordExecutionResultResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *GetAllExecutionResultsRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetAllExecutionResultsRequest) - if !ok { - that2, ok := that.(GetAllExecutionResultsRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *GetAllExecutionResultsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetAllExecutionResultsResponse) - if !ok { - that2, ok := that.(GetAllExecutionResultsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Results) != len(that1.Results) { - return false - } - for i := range this.Results { - if !this.Results[i].Equal(that1.Results[i]) { - return false - } - } - return true -} -func (this *GetAllExecutionResultsResponse_ExecutionResult) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetAllExecutionResultsResponse_ExecutionResult) - if !ok { - that2, ok := that.(GetAllExecutionResultsResponse_ExecutionResult) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.ScriptID.Equal(that1.ScriptID) { + if !this.ScriptID.Equal(that1.ScriptID) { return false } if !this.Timestamp.Equal(that1.Timestamp) { @@ -4099,151 +3416,43 @@ func (this *WithPrefixKeyResponse_KV) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *RegisterFileSourceRequest) GoString() string { +func (this *RegisterTracepointRequest) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 5) - s = append(s, "&metadatapb.RegisterFileSourceRequest{") + s = append(s, "&metadatapb.RegisterTracepointRequest{") if this.Requests != nil { s = append(s, "Requests: "+fmt.Sprintf("%#v", this.Requests)+",\n") } s = append(s, "}") return strings.Join(s, "") } -func (this *RegisterFileSourceResponse) GoString() string { +func (this *RegisterTracepointRequest_TracepointRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) - s = append(s, "&metadatapb.RegisterFileSourceResponse{") - if this.FileSources != nil { - s = append(s, "FileSources: "+fmt.Sprintf("%#v", this.FileSources)+",\n") + s := make([]string, 0, 7) + s = append(s, "&metadatapb.RegisterTracepointRequest_TracepointRequest{") + if this.TracepointDeployment != nil { + s = append(s, "TracepointDeployment: "+fmt.Sprintf("%#v", this.TracepointDeployment)+",\n") } - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.TTL != nil { + s = append(s, "TTL: "+fmt.Sprintf("%#v", this.TTL)+",\n") } s = append(s, "}") return strings.Join(s, "") } -func (this *RegisterFileSourceResponse_FileSourceStatus) GoString() string { +func (this *RegisterTracepointResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) - s = append(s, "&metadatapb.RegisterFileSourceResponse_FileSourceStatus{") - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - } - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - } - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetFileSourceInfoRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.GetFileSourceInfoRequest{") - if this.IDs != nil { - s = append(s, "IDs: "+fmt.Sprintf("%#v", this.IDs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetFileSourceInfoResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.GetFileSourceInfoResponse{") - if this.FileSources != nil { - s = append(s, "FileSources: "+fmt.Sprintf("%#v", this.FileSources)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetFileSourceInfoResponse_FileSourceState) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&metadatapb.GetFileSourceInfoResponse_FileSourceState{") - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - } - s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") - if this.Statuses != nil { - s = append(s, "Statuses: "+fmt.Sprintf("%#v", this.Statuses)+",\n") - } - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "ExpectedState: "+fmt.Sprintf("%#v", this.ExpectedState)+",\n") - s = append(s, "SchemaNames: "+fmt.Sprintf("%#v", this.SchemaNames)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RemoveFileSourceRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.RemoveFileSourceRequest{") - s = append(s, "Names: "+fmt.Sprintf("%#v", this.Names)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RemoveFileSourceResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.RemoveFileSourceResponse{") - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RegisterTracepointRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&metadatapb.RegisterTracepointRequest{") - if this.Requests != nil { - s = append(s, "Requests: "+fmt.Sprintf("%#v", this.Requests)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RegisterTracepointRequest_TracepointRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&metadatapb.RegisterTracepointRequest_TracepointRequest{") - if this.TracepointDeployment != nil { - s = append(s, "TracepointDeployment: "+fmt.Sprintf("%#v", this.TracepointDeployment)+",\n") - } - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - if this.TTL != nil { - s = append(s, "TTL: "+fmt.Sprintf("%#v", this.TTL)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RegisterTracepointResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&metadatapb.RegisterTracepointResponse{") - if this.Tracepoints != nil { - s = append(s, "Tracepoints: "+fmt.Sprintf("%#v", this.Tracepoints)+",\n") - } + s := make([]string, 0, 6) + s = append(s, "&metadatapb.RegisterTracepointResponse{") + if this.Tracepoints != nil { + s = append(s, "Tracepoints: "+fmt.Sprintf("%#v", this.Tracepoints)+",\n") + } if this.Status != nil { s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") } @@ -4795,150 +4004,6 @@ var _MetadataService_serviceDesc = grpc.ServiceDesc{ Metadata: "src/vizier/services/metadata/metadatapb/service.proto", } -// MetadataFileSourceServiceClient is the client API for MetadataFileSourceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetadataFileSourceServiceClient interface { - RegisterFileSource(ctx context.Context, in *RegisterFileSourceRequest, opts ...grpc.CallOption) (*RegisterFileSourceResponse, error) - GetFileSourceInfo(ctx context.Context, in *GetFileSourceInfoRequest, opts ...grpc.CallOption) (*GetFileSourceInfoResponse, error) - RemoveFileSource(ctx context.Context, in *RemoveFileSourceRequest, opts ...grpc.CallOption) (*RemoveFileSourceResponse, error) -} - -type metadataFileSourceServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetadataFileSourceServiceClient(cc *grpc.ClientConn) MetadataFileSourceServiceClient { - return &metadataFileSourceServiceClient{cc} -} - -func (c *metadataFileSourceServiceClient) RegisterFileSource(ctx context.Context, in *RegisterFileSourceRequest, opts ...grpc.CallOption) (*RegisterFileSourceResponse, error) { - out := new(RegisterFileSourceResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataFileSourceService/RegisterFileSource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metadataFileSourceServiceClient) GetFileSourceInfo(ctx context.Context, in *GetFileSourceInfoRequest, opts ...grpc.CallOption) (*GetFileSourceInfoResponse, error) { - out := new(GetFileSourceInfoResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataFileSourceService/GetFileSourceInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metadataFileSourceServiceClient) RemoveFileSource(ctx context.Context, in *RemoveFileSourceRequest, opts ...grpc.CallOption) (*RemoveFileSourceResponse, error) { - out := new(RemoveFileSourceResponse) - err := c.cc.Invoke(ctx, "/px.vizier.services.metadata.MetadataFileSourceService/RemoveFileSource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MetadataFileSourceServiceServer is the server API for MetadataFileSourceService service. -type MetadataFileSourceServiceServer interface { - RegisterFileSource(context.Context, *RegisterFileSourceRequest) (*RegisterFileSourceResponse, error) - GetFileSourceInfo(context.Context, *GetFileSourceInfoRequest) (*GetFileSourceInfoResponse, error) - RemoveFileSource(context.Context, *RemoveFileSourceRequest) (*RemoveFileSourceResponse, error) -} - -// UnimplementedMetadataFileSourceServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetadataFileSourceServiceServer struct { -} - -func (*UnimplementedMetadataFileSourceServiceServer) RegisterFileSource(ctx context.Context, req *RegisterFileSourceRequest) (*RegisterFileSourceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RegisterFileSource not implemented") -} -func (*UnimplementedMetadataFileSourceServiceServer) GetFileSourceInfo(ctx context.Context, req *GetFileSourceInfoRequest) (*GetFileSourceInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetFileSourceInfo not implemented") -} -func (*UnimplementedMetadataFileSourceServiceServer) RemoveFileSource(ctx context.Context, req *RemoveFileSourceRequest) (*RemoveFileSourceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveFileSource not implemented") -} - -func RegisterMetadataFileSourceServiceServer(s *grpc.Server, srv MetadataFileSourceServiceServer) { - s.RegisterService(&_MetadataFileSourceService_serviceDesc, srv) -} - -func _MetadataFileSourceService_RegisterFileSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegisterFileSourceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetadataFileSourceServiceServer).RegisterFileSource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataFileSourceService/RegisterFileSource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataFileSourceServiceServer).RegisterFileSource(ctx, req.(*RegisterFileSourceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetadataFileSourceService_GetFileSourceInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetFileSourceInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetadataFileSourceServiceServer).GetFileSourceInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataFileSourceService/GetFileSourceInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataFileSourceServiceServer).GetFileSourceInfo(ctx, req.(*GetFileSourceInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetadataFileSourceService_RemoveFileSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveFileSourceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetadataFileSourceServiceServer).RemoveFileSource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/px.vizier.services.metadata.MetadataFileSourceService/RemoveFileSource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetadataFileSourceServiceServer).RemoveFileSource(ctx, req.(*RemoveFileSourceRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetadataFileSourceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "px.vizier.services.metadata.MetadataFileSourceService", - HandlerType: (*MetadataFileSourceServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "RegisterFileSource", - Handler: _MetadataFileSourceService_RegisterFileSource_Handler, - }, - { - MethodName: "GetFileSourceInfo", - Handler: _MetadataFileSourceService_GetFileSourceInfo_Handler, - }, - { - MethodName: "RemoveFileSource", - Handler: _MetadataFileSourceService_RemoveFileSource_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "src/vizier/services/metadata/metadatapb/service.proto", -} - // MetadataTracepointServiceClient is the client API for MetadataTracepointService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. @@ -5909,7 +4974,7 @@ func (m *WithPrefixKeyResponse_KV) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *RegisterFileSourceRequest) Marshal() (dAtA []byte, err error) { +func (m *RegisterTracepointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5919,12 +4984,12 @@ func (m *RegisterFileSourceRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RegisterFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterTracepointRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5946,7 +5011,61 @@ func (m *RegisterFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *RegisterFileSourceResponse) Marshal() (dAtA []byte, err error) { +func (m *RegisterTracepointRequest_TracepointRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegisterTracepointRequest_TracepointRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RegisterTracepointRequest_TracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TTL != nil { + { + size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.TracepointDeployment != nil { + { + size, err := m.TracepointDeployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RegisterTracepointResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5956,12 +5075,12 @@ func (m *RegisterFileSourceResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RegisterFileSourceResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterTracepointResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterFileSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -5978,10 +5097,10 @@ func (m *RegisterFileSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, err i-- dAtA[i] = 0x12 } - if len(m.FileSources) > 0 { - for iNdEx := len(m.FileSources) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Tracepoints) > 0 { + for iNdEx := len(m.Tracepoints) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.FileSources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Tracepoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5995,7 +5114,7 @@ func (m *RegisterFileSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *RegisterFileSourceResponse_FileSourceStatus) Marshal() (dAtA []byte, err error) { +func (m *RegisterTracepointResponse_TracepointStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6005,12 +5124,12 @@ func (m *RegisterFileSourceResponse_FileSourceStatus) Marshal() (dAtA []byte, er return dAtA[:n], nil } -func (m *RegisterFileSourceResponse_FileSourceStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *RegisterTracepointResponse_TracepointStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterFileSourceResponse_FileSourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RegisterTracepointResponse_TracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6049,7 +5168,7 @@ func (m *RegisterFileSourceResponse_FileSourceStatus) MarshalToSizedBuffer(dAtA return len(dAtA) - i, nil } -func (m *GetFileSourceInfoRequest) Marshal() (dAtA []byte, err error) { +func (m *GetTracepointInfoRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6059,12 +5178,12 @@ func (m *GetFileSourceInfoRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetFileSourceInfoRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetTracepointInfoRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetFileSourceInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetTracepointInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6086,7 +5205,7 @@ func (m *GetFileSourceInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *GetFileSourceInfoResponse) Marshal() (dAtA []byte, err error) { +func (m *GetTracepointInfoResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6096,20 +5215,20 @@ func (m *GetFileSourceInfoResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetFileSourceInfoResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetTracepointInfoResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetFileSourceInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetTracepointInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.FileSources) > 0 { - for iNdEx := len(m.FileSources) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Tracepoints) > 0 { + for iNdEx := len(m.Tracepoints) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.FileSources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Tracepoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6123,7 +5242,7 @@ func (m *GetFileSourceInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *GetFileSourceInfoResponse_FileSourceState) Marshal() (dAtA []byte, err error) { +func (m *GetTracepointInfoResponse_TracepointState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6133,12 +5252,12 @@ func (m *GetFileSourceInfoResponse_FileSourceState) Marshal() (dAtA []byte, err return dAtA[:n], nil } -func (m *GetFileSourceInfoResponse_FileSourceState) MarshalTo(dAtA []byte) (int, error) { +func (m *GetTracepointInfoResponse_TracepointState) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetFileSourceInfoResponse_FileSourceState) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetTracepointInfoResponse_TracepointState) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6198,7 +5317,7 @@ func (m *GetFileSourceInfoResponse_FileSourceState) MarshalToSizedBuffer(dAtA [] return len(dAtA) - i, nil } -func (m *RemoveFileSourceRequest) Marshal() (dAtA []byte, err error) { +func (m *RemoveTracepointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6208,12 +5327,12 @@ func (m *RemoveFileSourceRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveFileSourceRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RemoveTracepointRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RemoveFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RemoveTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6230,7 +5349,7 @@ func (m *RemoveFileSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *RemoveFileSourceResponse) Marshal() (dAtA []byte, err error) { +func (m *RemoveTracepointResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6240,12 +5359,12 @@ func (m *RemoveFileSourceResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveFileSourceResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *RemoveTracepointResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RemoveFileSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RemoveTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6265,7 +5384,7 @@ func (m *RemoveFileSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *RegisterTracepointRequest) Marshal() (dAtA []byte, err error) { +func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6275,34 +5394,41 @@ func (m *RegisterTracepointRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RegisterTracepointRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *UpdateConfigRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UpdateConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Requests) > 0 { - for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + if len(m.AgentPodName) > 0 { + i -= len(m.AgentPodName) + copy(dAtA[i:], m.AgentPodName) + i = encodeVarintService(dAtA, i, uint64(len(m.AgentPodName))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintService(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintService(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RegisterTracepointRequest_TracepointRequest) Marshal() (dAtA []byte, err error) { +func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6312,38 +5438,19 @@ func (m *RegisterTracepointRequest_TracepointRequest) Marshal() (dAtA []byte, er return dAtA[:n], nil } -func (m *RegisterTracepointRequest_TracepointRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *UpdateConfigResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointRequest_TracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *UpdateConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.TTL != nil { - { - size, err := m.TTL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.TracepointDeployment != nil { + if m.Status != nil { { - size, err := m.TracepointDeployment.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6356,7 +5463,7 @@ func (m *RegisterTracepointRequest_TracepointRequest) MarshalToSizedBuffer(dAtA return len(dAtA) - i, nil } -func (m *RegisterTracepointResponse) Marshal() (dAtA []byte, err error) { +func (m *GetScriptsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6366,38 +5473,61 @@ func (m *RegisterTracepointResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RegisterTracepointResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetScriptsRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *GetScriptsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Tracepoints) > 0 { - for iNdEx := len(m.Tracepoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tracepoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *GetScriptsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Scripts) > 0 { + for k := range m.Scripts { + v := m.Scripts[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintService(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintService(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0xa } @@ -6405,7 +5535,7 @@ func (m *RegisterTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *RegisterTracepointResponse_TracepointStatus) Marshal() (dAtA []byte, err error) { +func (m *AddOrUpdateScriptRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6415,38 +5545,19 @@ func (m *RegisterTracepointResponse_TracepointStatus) Marshal() (dAtA []byte, er return dAtA[:n], nil } -func (m *RegisterTracepointResponse_TracepointStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *AddOrUpdateScriptRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RegisterTracepointResponse_TracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AddOrUpdateScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - } - if m.ID != nil { - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Status != nil { + if m.Script != nil { { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Script.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6459,7 +5570,7 @@ func (m *RegisterTracepointResponse_TracepointStatus) MarshalToSizedBuffer(dAtA return len(dAtA) - i, nil } -func (m *GetTracepointInfoRequest) Marshal() (dAtA []byte, err error) { +func (m *AddOrUpdateScriptResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6469,34 +5580,20 @@ func (m *GetTracepointInfoRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTracepointInfoRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *AddOrUpdateScriptResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetTracepointInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AddOrUpdateScriptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.IDs) > 0 { - for iNdEx := len(m.IDs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IDs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *GetTracepointInfoResponse) Marshal() (dAtA []byte, err error) { +func (m *DeleteScriptRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6506,34 +5603,32 @@ func (m *GetTracepointInfoResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTracepointInfoResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *DeleteScriptRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetTracepointInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeleteScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Tracepoints) > 0 { - for iNdEx := len(m.Tracepoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tracepoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + if m.ScriptID != nil { + { + size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetTracepointInfoResponse_TracepointState) Marshal() (dAtA []byte, err error) { +func (m *DeleteScriptResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6543,72 +5638,69 @@ func (m *GetTracepointInfoResponse_TracepointState) Marshal() (dAtA []byte, err return dAtA[:n], nil } -func (m *GetTracepointInfoResponse_TracepointState) MarshalTo(dAtA []byte) (int, error) { +func (m *DeleteScriptResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetTracepointInfoResponse_TracepointState) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *DeleteScriptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.SchemaNames) > 0 { - for iNdEx := len(m.SchemaNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.SchemaNames[iNdEx]) - copy(dAtA[i:], m.SchemaNames[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.SchemaNames[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.ExpectedState != 0 { - i = encodeVarintService(dAtA, i, uint64(m.ExpectedState)) - i-- - dAtA[i] = 0x28 + return len(dAtA) - i, nil +} + +func (m *SetScriptsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - } - if len(m.Statuses) > 0 { - for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + return dAtA[:n], nil +} + +func (m *SetScriptsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Scripts) > 0 { + for k := range m.Scripts { + v := m.Scripts[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintService(dAtA, i, uint64(len(k))) i-- - dAtA[i] = 0x1a - } - } - if m.State != 0 { - i = encodeVarintService(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - if m.ID != nil { - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i = encodeVarintService(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RemoveTracepointRequest) Marshal() (dAtA []byte, err error) { +func (m *SetScriptsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6618,29 +5710,20 @@ func (m *RemoveTracepointRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveTracepointRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *SetScriptsResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RemoveTracepointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Names) > 0 { - for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Names[iNdEx]) - copy(dAtA[i:], m.Names[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Names[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *RemoveTracepointResponse) Marshal() (dAtA []byte, err error) { +func (m *ExecutionStats) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6650,32 +5733,40 @@ func (m *RemoveTracepointResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveTracepointResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *ExecutionStats) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RemoveTracepointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } + if m.RecordsProcessed != 0 { + i = encodeVarintService(dAtA, i, uint64(m.RecordsProcessed)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x20 + } + if m.BytesProcessed != 0 { + i = encodeVarintService(dAtA, i, uint64(m.BytesProcessed)) + i-- + dAtA[i] = 0x18 + } + if m.CompilationTimeNs != 0 { + i = encodeVarintService(dAtA, i, uint64(m.CompilationTimeNs)) + i-- + dAtA[i] = 0x10 + } + if m.ExecutionTimeNs != 0 { + i = encodeVarintService(dAtA, i, uint64(m.ExecutionTimeNs)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { +func (m *RecordExecutionResultRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6685,63 +5776,83 @@ func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateConfigRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RecordExecutionResultRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UpdateConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RecordExecutionResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.AgentPodName) > 0 { - i -= len(m.AgentPodName) - copy(dAtA[i:], m.AgentPodName) - i = encodeVarintService(dAtA, i, uint64(len(m.AgentPodName))) - i-- - dAtA[i] = 0x1a + if m.Result != nil { + { + size := m.Result.Size() + i -= size + if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintService(dAtA, i, uint64(len(m.Value))) + if m.Timestamp != nil { + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintService(dAtA, i, uint64(len(m.Key))) + if m.ScriptID != nil { + { + size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { +func (m *RecordExecutionResultRequest_Error) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UpdateConfigResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *RecordExecutionResultRequest_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *RecordExecutionResultRequest_ExecutionStats) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UpdateConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RecordExecutionResultRequest_ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if m.Status != nil { + if m.ExecutionStats != nil { { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ExecutionStats.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6749,12 +5860,11 @@ func (m *UpdateConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintService(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x22 } return len(dAtA) - i, nil } - -func (m *GetScriptsRequest) Marshal() (dAtA []byte, err error) { +func (m *RecordExecutionResultResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6764,12 +5874,12 @@ func (m *GetScriptsRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetScriptsRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *RecordExecutionResultResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RecordExecutionResultResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -6777,7 +5887,7 @@ func (m *GetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *GetScriptsResponse) Marshal() (dAtA []byte, err error) { +func (m *GetAllExecutionResultsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6787,46 +5897,20 @@ func (m *GetScriptsResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetScriptsResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Scripts) > 0 { - for k := range m.Scripts { - v := m.Scripts[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintService(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintService(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *AddOrUpdateScriptRequest) Marshal() (dAtA []byte, err error) { +func (m *GetAllExecutionResultsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6836,154 +5920,26 @@ func (m *AddOrUpdateScriptRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AddOrUpdateScriptRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AddOrUpdateScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Script != nil { - { - size, err := m.Script.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AddOrUpdateScriptResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddOrUpdateScriptResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AddOrUpdateScriptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *DeleteScriptRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteScriptRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteScriptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ScriptID != nil { - { - size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteScriptResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteScriptResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteScriptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *SetScriptsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetScriptsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Scripts) > 0 { - for k := range m.Scripts { - v := m.Scripts[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintService(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintService(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0xa } @@ -6991,73 +5947,7 @@ func (m *SetScriptsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SetScriptsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetScriptsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SetScriptsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ExecutionStats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecutionStats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RecordsProcessed != 0 { - i = encodeVarintService(dAtA, i, uint64(m.RecordsProcessed)) - i-- - dAtA[i] = 0x20 - } - if m.BytesProcessed != 0 { - i = encodeVarintService(dAtA, i, uint64(m.BytesProcessed)) - i-- - dAtA[i] = 0x18 - } - if m.CompilationTimeNs != 0 { - i = encodeVarintService(dAtA, i, uint64(m.CompilationTimeNs)) - i-- - dAtA[i] = 0x10 - } - if m.ExecutionTimeNs != 0 { - i = encodeVarintService(dAtA, i, uint64(m.ExecutionTimeNs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *RecordExecutionResultRequest) Marshal() (dAtA []byte, err error) { +func (m *GetAllExecutionResultsResponse_ExecutionResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7067,12 +5957,12 @@ func (m *RecordExecutionResultRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RecordExecutionResultRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RecordExecutionResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -7113,12 +6003,12 @@ func (m *RecordExecutionResultRequest) MarshalToSizedBuffer(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *RecordExecutionResultRequest_Error) MarshalTo(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RecordExecutionResultRequest_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Error != nil { { @@ -7134,12 +6024,12 @@ func (m *RecordExecutionResultRequest_Error) MarshalToSizedBuffer(dAtA []byte) ( } return len(dAtA) - i, nil } -func (m *RecordExecutionResultRequest_ExecutionStats) MarshalTo(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RecordExecutionResultRequest_ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.ExecutionStats != nil { { @@ -7155,218 +6045,37 @@ func (m *RecordExecutionResultRequest_ExecutionStats) MarshalToSizedBuffer(dAtA } return len(dAtA) - i, nil } -func (m *RecordExecutionResultResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintService(dAtA []byte, offset int, v uint64) int { + offset -= sovService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return base } - -func (m *RecordExecutionResultResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *SchemaRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n } -func (m *RecordExecutionResultResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *SchemaResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - return len(dAtA) - i, nil -} - -func (m *GetAllExecutionResultsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllExecutionResultsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllExecutionResultsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *GetAllExecutionResultsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllExecutionResultsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllExecutionResultsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Result != nil { - { - size := m.Result.Size() - i -= size - if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Timestamp != nil { - { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.ScriptID != nil { - { - size, err := m.ScriptID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Error != nil { - { - size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ExecutionStats != nil { - { - size, err := m.ExecutionStats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func encodeVarintService(dAtA []byte, offset int, v uint64) int { - offset -= sovService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SchemaRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *SchemaResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovService(uint64(l)) - } - return n + if m.Schema != nil { + l = m.Schema.Size() + n += 1 + l + sovService(uint64(l)) + } + return n } func (m *AgentInfoRequest) Size() (n int) { @@ -7555,7 +6264,7 @@ func (m *WithPrefixKeyResponse_KV) Size() (n int) { return n } -func (m *RegisterFileSourceRequest) Size() (n int) { +func (m *RegisterTracepointRequest) Size() (n int) { if m == nil { return 0 } @@ -7570,14 +6279,35 @@ func (m *RegisterFileSourceRequest) Size() (n int) { return n } -func (m *RegisterFileSourceResponse) Size() (n int) { +func (m *RegisterTracepointRequest_TracepointRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TracepointDeployment != nil { + l = m.TracepointDeployment.Size() + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + if m.TTL != nil { + l = m.TTL.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *RegisterTracepointResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.FileSources) > 0 { - for _, e := range m.FileSources { + if len(m.Tracepoints) > 0 { + for _, e := range m.Tracepoints { l = e.Size() n += 1 + l + sovService(uint64(l)) } @@ -7589,7 +6319,7 @@ func (m *RegisterFileSourceResponse) Size() (n int) { return n } -func (m *RegisterFileSourceResponse_FileSourceStatus) Size() (n int) { +func (m *RegisterTracepointResponse_TracepointStatus) Size() (n int) { if m == nil { return 0 } @@ -7610,7 +6340,7 @@ func (m *RegisterFileSourceResponse_FileSourceStatus) Size() (n int) { return n } -func (m *GetFileSourceInfoRequest) Size() (n int) { +func (m *GetTracepointInfoRequest) Size() (n int) { if m == nil { return 0 } @@ -7625,14 +6355,14 @@ func (m *GetFileSourceInfoRequest) Size() (n int) { return n } -func (m *GetFileSourceInfoResponse) Size() (n int) { +func (m *GetTracepointInfoResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.FileSources) > 0 { - for _, e := range m.FileSources { + if len(m.Tracepoints) > 0 { + for _, e := range m.Tracepoints { l = e.Size() n += 1 + l + sovService(uint64(l)) } @@ -7640,7 +6370,7 @@ func (m *GetFileSourceInfoResponse) Size() (n int) { return n } -func (m *GetFileSourceInfoResponse_FileSourceState) Size() (n int) { +func (m *GetTracepointInfoResponse_TracepointState) Size() (n int) { if m == nil { return 0 } @@ -7675,7 +6405,7 @@ func (m *GetFileSourceInfoResponse_FileSourceState) Size() (n int) { return n } -func (m *RemoveFileSourceRequest) Size() (n int) { +func (m *RemoveTracepointRequest) Size() (n int) { if m == nil { return 0 } @@ -7690,7 +6420,7 @@ func (m *RemoveFileSourceRequest) Size() (n int) { return n } -func (m *RemoveFileSourceResponse) Size() (n int) { +func (m *RemoveTracepointResponse) Size() (n int) { if m == nil { return 0 } @@ -7703,54 +6433,33 @@ func (m *RemoveFileSourceResponse) Size() (n int) { return n } -func (m *RegisterTracepointRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Requests) > 0 { - for _, e := range m.Requests { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n -} - -func (m *RegisterTracepointRequest_TracepointRequest) Size() (n int) { +func (m *UpdateConfigRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.TracepointDeployment != nil { - l = m.TracepointDeployment.Size() + l = len(m.Key) + if l > 0 { n += 1 + l + sovService(uint64(l)) } - l = len(m.Name) + l = len(m.Value) if l > 0 { n += 1 + l + sovService(uint64(l)) } - if m.TTL != nil { - l = m.TTL.Size() + l = len(m.AgentPodName) + if l > 0 { n += 1 + l + sovService(uint64(l)) } return n } -func (m *RegisterTracepointResponse) Size() (n int) { +func (m *UpdateConfigResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Tracepoints) > 0 { - for _, e := range m.Tracepoints { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } if m.Status != nil { l = m.Status.Size() n += 1 + l + sovService(uint64(l)) @@ -7758,155 +6467,7 @@ func (m *RegisterTracepointResponse) Size() (n int) { return n } -func (m *RegisterTracepointResponse_TracepointStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *GetTracepointInfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.IDs) > 0 { - for _, e := range m.IDs { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n -} - -func (m *GetTracepointInfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Tracepoints) > 0 { - for _, e := range m.Tracepoints { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n -} - -func (m *GetTracepointInfoResponse_TracepointState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.State != 0 { - n += 1 + sovService(uint64(m.State)) - } - if len(m.Statuses) > 0 { - for _, e := range m.Statuses { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.ExpectedState != 0 { - n += 1 + sovService(uint64(m.ExpectedState)) - } - if len(m.SchemaNames) > 0 { - for _, s := range m.SchemaNames { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - return n -} - -func (m *RemoveTracepointRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - return n -} - -func (m *RemoveTracepointResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *UpdateConfigRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.AgentPodName) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *UpdateConfigResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *GetScriptsRequest) Size() (n int) { +func (m *GetScriptsRequest) Size() (n int) { if m == nil { return 0 } @@ -8328,119 +6889,6 @@ func (this *WithPrefixKeyResponse_KV) String() string { }, "") return s } -func (this *RegisterFileSourceRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForRequests := "[]*FileSourceDeployment{" - for _, f := range this.Requests { - repeatedStringForRequests += strings.Replace(fmt.Sprintf("%v", f), "FileSourceDeployment", "ir.FileSourceDeployment", 1) + "," - } - repeatedStringForRequests += "}" - s := strings.Join([]string{`&RegisterFileSourceRequest{`, - `Requests:` + repeatedStringForRequests + `,`, - `}`, - }, "") - return s -} -func (this *RegisterFileSourceResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForFileSources := "[]*RegisterFileSourceResponse_FileSourceStatus{" - for _, f := range this.FileSources { - repeatedStringForFileSources += strings.Replace(fmt.Sprintf("%v", f), "RegisterFileSourceResponse_FileSourceStatus", "RegisterFileSourceResponse_FileSourceStatus", 1) + "," - } - repeatedStringForFileSources += "}" - s := strings.Join([]string{`&RegisterFileSourceResponse{`, - `FileSources:` + repeatedStringForFileSources + `,`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RegisterFileSourceResponse_FileSourceStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RegisterFileSourceResponse_FileSourceStatus{`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *GetFileSourceInfoRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForIDs := "[]*UUID{" - for _, f := range this.IDs { - repeatedStringForIDs += strings.Replace(fmt.Sprintf("%v", f), "UUID", "uuidpb.UUID", 1) + "," - } - repeatedStringForIDs += "}" - s := strings.Join([]string{`&GetFileSourceInfoRequest{`, - `IDs:` + repeatedStringForIDs + `,`, - `}`, - }, "") - return s -} -func (this *GetFileSourceInfoResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForFileSources := "[]*GetFileSourceInfoResponse_FileSourceState{" - for _, f := range this.FileSources { - repeatedStringForFileSources += strings.Replace(fmt.Sprintf("%v", f), "GetFileSourceInfoResponse_FileSourceState", "GetFileSourceInfoResponse_FileSourceState", 1) + "," - } - repeatedStringForFileSources += "}" - s := strings.Join([]string{`&GetFileSourceInfoResponse{`, - `FileSources:` + repeatedStringForFileSources + `,`, - `}`, - }, "") - return s -} -func (this *GetFileSourceInfoResponse_FileSourceState) String() string { - if this == nil { - return "nil" - } - repeatedStringForStatuses := "[]*Status{" - for _, f := range this.Statuses { - repeatedStringForStatuses += strings.Replace(fmt.Sprintf("%v", f), "Status", "statuspb.Status", 1) + "," - } - repeatedStringForStatuses += "}" - s := strings.Join([]string{`&GetFileSourceInfoResponse_FileSourceState{`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `Statuses:` + repeatedStringForStatuses + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ExpectedState:` + fmt.Sprintf("%v", this.ExpectedState) + `,`, - `SchemaNames:` + fmt.Sprintf("%v", this.SchemaNames) + `,`, - `}`, - }, "") - return s -} -func (this *RemoveFileSourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemoveFileSourceRequest{`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `}`, - }, "") - return s -} -func (this *RemoveFileSourceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemoveFileSourceResponse{`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `}`, - }, "") - return s -} func (this *RegisterTracepointRequest) String() string { if this == nil { return "nil" @@ -8750,747 +7198,87 @@ func (this *GetAllExecutionResultsRequest) String() string { func (this *GetAllExecutionResultsResponse) String() string { if this == nil { return "nil" - } - repeatedStringForResults := "[]*GetAllExecutionResultsResponse_ExecutionResult{" - for _, f := range this.Results { - repeatedStringForResults += strings.Replace(fmt.Sprintf("%v", f), "GetAllExecutionResultsResponse_ExecutionResult", "GetAllExecutionResultsResponse_ExecutionResult", 1) + "," - } - repeatedStringForResults += "}" - s := strings.Join([]string{`&GetAllExecutionResultsResponse{`, - `Results:` + repeatedStringForResults + `,`, - `}`, - }, "") - return s -} -func (this *GetAllExecutionResultsResponse_ExecutionResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult{`, - `ScriptID:` + strings.Replace(fmt.Sprintf("%v", this.ScriptID), "UUID", "uuidpb.UUID", 1) + `,`, - `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1) + `,`, - `Result:` + fmt.Sprintf("%v", this.Result) + `,`, - `}`, - }, "") - return s -} -func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult_Error{`, - `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Status", "statuspb.Status", 1) + `,`, - `}`, - }, "") - return s -} -func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats{`, - `ExecutionStats:` + strings.Replace(fmt.Sprintf("%v", this.ExecutionStats), "ExecutionStats", "ExecutionStats", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringService(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *SchemaRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SchemaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SchemaResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SchemaResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &schemapb.Schema{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AgentInfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentInfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AgentInfoResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentInfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = append(m.Info, &AgentMetadata{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AgentMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Agent == nil { - m.Agent = &agentpb.Agent{} - } - if err := m.Agent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &agentpb.AgentStatus{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CarnotInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CarnotInfo == nil { - m.CarnotInfo = &distributedpb.CarnotInfo{} - } - if err := m.CarnotInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AgentUpdatesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentUpdatesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentUpdatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdateInterval", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUpdateInterval == nil { - m.MaxUpdateInterval = &types.Duration{} - } - if err := m.MaxUpdateInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdatesPerResponse", wireType) - } - m.MaxUpdatesPerResponse = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxUpdatesPerResponse |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AgentUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AgentID == nil { - m.AgentID = &uuidpb.UUID{} - } - if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Update = &AgentUpdate_Deleted{b} - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &agentpb.Agent{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Update = &AgentUpdate_Agent{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService + } + repeatedStringForResults := "[]*GetAllExecutionResultsResponse_ExecutionResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(fmt.Sprintf("%v", f), "GetAllExecutionResultsResponse_ExecutionResult", "GetAllExecutionResultsResponse_ExecutionResult", 1) + "," + } + repeatedStringForResults += "}" + s := strings.Join([]string{`&GetAllExecutionResultsResponse{`, + `Results:` + repeatedStringForResults + `,`, + `}`, + }, "") + return s +} +func (this *GetAllExecutionResultsResponse_ExecutionResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult{`, + `ScriptID:` + strings.Replace(fmt.Sprintf("%v", this.ScriptID), "UUID", "uuidpb.UUID", 1) + `,`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1) + `,`, + `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `}`, + }, "") + return s +} +func (this *GetAllExecutionResultsResponse_ExecutionResult_Error) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult_Error{`, + `Error:` + strings.Replace(fmt.Sprintf("%v", this.Error), "Status", "statuspb.Status", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetAllExecutionResultsResponse_ExecutionResult_ExecutionStats{`, + `ExecutionStats:` + strings.Replace(fmt.Sprintf("%v", this.ExecutionStats), "ExecutionStats", "ExecutionStats", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringService(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SchemaRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - v := &messagespb.AgentDataInfo{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Update = &AgentUpdate_DataInfo{v} - iNdEx = postIndex + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -9512,7 +7300,7 @@ func (m *AgentUpdate) Unmarshal(dAtA []byte) error { } return nil } -func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { +func (m *SchemaResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9535,49 +7323,15 @@ func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AgentUpdatesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AgentUpdatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentUpdates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AgentUpdates = append(m.AgentUpdates, &AgentUpdate{}) - if err := m.AgentUpdates[len(m.AgentUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentSchemas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9604,51 +7358,13 @@ func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AgentSchemas = append(m.AgentSchemas, &distributedpb.SchemaInfo{}) - if err := m.AgentSchemas[len(m.AgentSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Schema == nil { + m.Schema = &schemapb.Schema{} + } + if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentSchemasUpdated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AgentSchemasUpdated = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndOfVersion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EndOfVersion = bool(v != 0) default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -9670,99 +7386,35 @@ func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *WithPrefixKeyRequest) Unmarshal(dAtA []byte) error { +func (m *AgentInfoRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WithPrefixKeyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WithPrefixKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Prefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proto", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Proto = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AgentInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AgentInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -9784,7 +7436,7 @@ func (m *WithPrefixKeyRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { +func (m *AgentInfoResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9807,15 +7459,15 @@ func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WithPrefixKeyResponse: wiretype end group for non-group") + return fmt.Errorf("proto: AgentInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WithPrefixKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AgentInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9842,8 +7494,8 @@ func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kvs = append(m.Kvs, &WithPrefixKeyResponse_KV{}) - if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Info = append(m.Info, &AgentMetadata{}) + if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9868,7 +7520,7 @@ func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { +func (m *AgentMetadata) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9891,17 +7543,17 @@ func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KV: wiretype end group for non-group") + return fmt.Errorf("proto: AgentMetadata: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AgentMetadata: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -9911,29 +7563,33 @@ func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + if m.Agent == nil { + m.Agent = &agentpb.Agent{} + } + if err := m.Agent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -9943,79 +7599,31 @@ func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} + if m.Status == nil { + m.Status = &agentpb.AgentStatus{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RegisterFileSourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RegisterFileSourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RegisterFileSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CarnotInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10042,8 +7650,10 @@ func (m *RegisterFileSourceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Requests = append(m.Requests, &ir.FileSourceDeployment{}) - if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.CarnotInfo == nil { + m.CarnotInfo = &distributedpb.CarnotInfo{} + } + if err := m.CarnotInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10068,7 +7678,7 @@ func (m *RegisterFileSourceRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *RegisterFileSourceResponse) Unmarshal(dAtA []byte) error { +func (m *AgentUpdatesRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10091,15 +7701,15 @@ func (m *RegisterFileSourceResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RegisterFileSourceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: AgentUpdatesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RegisterFileSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AgentUpdatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileSources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdateInterval", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10126,16 +7736,18 @@ func (m *RegisterFileSourceResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FileSources = append(m.FileSources, &RegisterFileSourceResponse_FileSourceStatus{}) - if err := m.FileSources[len(m.FileSources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MaxUpdateInterval == nil { + m.MaxUpdateInterval = &types.Duration{} + } + if err := m.MaxUpdateInterval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdatesPerResponse", wireType) } - var msglen int + m.MaxUpdatesPerResponse = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -10145,28 +7757,11 @@ func (m *RegisterFileSourceResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.MaxUpdatesPerResponse |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &statuspb.Status{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipService(dAtA[iNdEx:]) @@ -10188,7 +7783,7 @@ func (m *RegisterFileSourceResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *RegisterFileSourceResponse_FileSourceStatus) Unmarshal(dAtA []byte) error { +func (m *AgentUpdate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10211,15 +7806,15 @@ func (m *RegisterFileSourceResponse_FileSourceStatus) Unmarshal(dAtA []byte) err fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FileSourceStatus: wiretype end group for non-group") + return fmt.Errorf("proto: AgentUpdate: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FileSourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AgentUpdate: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10246,18 +7841,18 @@ func (m *RegisterFileSourceResponse_FileSourceStatus) Unmarshal(dAtA []byte) err if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &statuspb.Status{} + if m.AgentID == nil { + m.AgentID = &uuidpb.UUID{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -10267,33 +7862,18 @@ func (m *RegisterFileSourceResponse_FileSourceStatus) Unmarshal(dAtA []byte) err } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ID == nil { - m.ID = &uuidpb.UUID{} - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Update = &AgentUpdate_Deleted{b} case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Agent", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -10303,77 +7883,30 @@ func (m *RegisterFileSourceResponse_FileSourceStatus) Unmarshal(dAtA []byte) err } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { + v := &agentpb.Agent{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetFileSourceInfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetFileSourceInfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetFileSourceInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Update = &AgentUpdate_Agent{v} + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IDs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DataInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10400,10 +7933,11 @@ func (m *GetFileSourceInfoRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IDs = append(m.IDs, &uuidpb.UUID{}) - if err := m.IDs[len(m.IDs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &messagespb.AgentDataInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Update = &AgentUpdate_DataInfo{v} iNdEx = postIndex default: iNdEx = preIndex @@ -10426,7 +7960,7 @@ func (m *GetFileSourceInfoRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetFileSourceInfoResponse) Unmarshal(dAtA []byte) error { +func (m *AgentUpdatesResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10449,15 +7983,15 @@ func (m *GetFileSourceInfoResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetFileSourceInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: AgentUpdatesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetFileSourceInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AgentUpdatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileSources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AgentUpdates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10484,64 +8018,14 @@ func (m *GetFileSourceInfoResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FileSources = append(m.FileSources, &GetFileSourceInfoResponse_FileSourceState{}) - if err := m.FileSources[len(m.FileSources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AgentUpdates = append(m.AgentUpdates, &AgentUpdate{}) + if err := m.AgentUpdates[len(m.AgentUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetFileSourceInfoResponse_FileSourceState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileSourceState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileSourceState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AgentSchemas", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10568,18 +8052,16 @@ func (m *GetFileSourceInfoResponse_FileSourceState) Unmarshal(dAtA []byte) error if postIndex > l { return io.ErrUnexpectedEOF } - if m.ID == nil { - m.ID = &uuidpb.UUID{} - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AgentSchemas = append(m.AgentSchemas, &distributedpb.SchemaInfo{}) + if err := m.AgentSchemas[len(m.AgentSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AgentSchemasUpdated", wireType) } - m.State = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -10589,16 +8071,17 @@ func (m *GetFileSourceInfoResponse_FileSourceState) Unmarshal(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - m.State |= statuspb.LifeCycleState(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + m.AgentSchemasUpdated = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndOfVersion", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -10608,29 +8091,65 @@ func (m *GetFileSourceInfoResponse_FileSourceState) Unmarshal(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthService + m.EndOfVersion = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthService } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Statuses = append(m.Statuses, &statuspb.Status{}) - if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WithPrefixKeyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService } - iNdEx = postIndex - case 4: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WithPrefixKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WithPrefixKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10658,30 +8177,11 @@ func (m *GetFileSourceInfoResponse_FileSourceState) Unmarshal(dAtA []byte) error if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Prefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedState", wireType) - } - m.ExpectedState = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpectedState |= statuspb.LifeCycleState(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Proto", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10709,7 +8209,7 @@ func (m *GetFileSourceInfoResponse_FileSourceState) Unmarshal(dAtA []byte) error if postIndex > l { return io.ErrUnexpectedEOF } - m.SchemaNames = append(m.SchemaNames, string(dAtA[iNdEx:postIndex])) + m.Proto = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10732,7 +8232,7 @@ func (m *GetFileSourceInfoResponse_FileSourceState) Unmarshal(dAtA []byte) error } return nil } -func (m *RemoveFileSourceRequest) Unmarshal(dAtA []byte) error { +func (m *WithPrefixKeyResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10755,17 +8255,17 @@ func (m *RemoveFileSourceRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveFileSourceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WithPrefixKeyResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveFileSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WithPrefixKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -10775,23 +8275,25 @@ func (m *RemoveFileSourceRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + m.Kvs = append(m.Kvs, &WithPrefixKeyResponse_KV{}) + if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10814,7 +8316,7 @@ func (m *RemoveFileSourceRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *RemoveFileSourceResponse) Unmarshal(dAtA []byte) error { +func (m *WithPrefixKeyResponse_KV) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10837,17 +8339,17 @@ func (m *RemoveFileSourceResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveFileSourceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: KV: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveFileSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowService @@ -10857,26 +8359,56 @@ func (m *RemoveFileSourceResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthService } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthService } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &statuspb.Status{} + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} } iNdEx = postIndex default: diff --git a/src/vizier/services/metadata/metadatapb/service.proto b/src/vizier/services/metadata/metadatapb/service.proto index 1b5dd699660..7a184c73d15 100644 --- a/src/vizier/services/metadata/metadatapb/service.proto +++ b/src/vizier/services/metadata/metadatapb/service.proto @@ -28,7 +28,6 @@ import "google/protobuf/timestamp.proto"; import "src/api/proto/uuidpb/uuid.proto"; import "src/carnot/planner/distributedpb/distributed_plan.proto"; import "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.proto"; -import "src/carnot/planner/file_source/ir/logical.proto"; import "src/common/base/statuspb/status.proto"; import "src/table_store/schemapb/schema.proto"; import "src/vizier/messages/messagespb/messages.proto"; @@ -46,12 +45,6 @@ service MetadataService { rpc GetWithPrefixKey(WithPrefixKeyRequest) returns (WithPrefixKeyResponse); } -service MetadataFileSourceService { - rpc RegisterFileSource(RegisterFileSourceRequest) returns (RegisterFileSourceResponse); - rpc GetFileSourceInfo(GetFileSourceInfoRequest) returns (GetFileSourceInfoResponse); - rpc RemoveFileSource(RemoveFileSourceRequest) returns (RemoveFileSourceResponse); -} - service MetadataTracepointService { rpc RegisterTracepoint(RegisterTracepointRequest) returns (RegisterTracepointResponse); rpc GetTracepointInfo(GetTracepointInfoRequest) returns (GetTracepointInfoResponse); @@ -169,63 +162,6 @@ message WithPrefixKeyResponse { repeated KV kvs = 1; } -message RegisterFileSourceRequest { - repeated px.carnot.planner.file_source.ir.FileSourceDeployment requests = 1; -} - -// The response to a RegisterFileSourceRequest. -message RegisterFileSourceResponse { - message FileSourceStatus { - px.statuspb.Status status = 1; // TODO(ddelnano): Is this necessary? - // The ID of the file source. This should be the user-specified name for the file source . - uuidpb.UUID id = 2 [ (gogoproto.customname) = "ID" ]; - string name = 3; - } - repeated FileSourceStatus file_sources = 1; - // Overall status of whether file source registration requests were initiated with/without - // errors. - px.statuspb.Status status = 2; -} - -// The request to check the status for a file source with the given names. -message GetFileSourceInfoRequest { - // The file source IDs to get the info for. If empty, fetches the info for all known file source - // s. - repeated uuidpb.UUID ids = 1 [ (gogoproto.customname) = "IDs" ]; -} - -// The status of whether the file source has successfully registered or not. -message GetFileSourceInfoResponse { - message FileSourceState { - // The file source ID. - uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; - // The state of the file source . - px.statuspb.LifeCycleState state = 2; - // The status of the file source, specified if the state of the file source is not healthy. - repeated px.statuspb.Status statuses = 3; - string name = 4; - // The desired state for the file source . This can be used to determine whether - // the file source is just starting up or in the process of terminating. - px.statuspb.LifeCycleState expected_state = 5; - repeated string schema_names = 6; - } - // List of file source states. - repeated FileSourceState file_sources = 1; -} - -// The request to evict a file source . This will normally happen via the file source 's TTL, but -// can be initiated via request as well. -message RemoveFileSourceRequest { - // The name of the file source to remove. - repeated string names = 1; -} - -// The response to the file source removal. -message RemoveFileSourceResponse { - // Status of whether the file source removal request was initiated with/without errors. - px.statuspb.Status status = 1; -} - // The request to register tracepoints on all PEMs. message RegisterTracepointRequest { message TracepointRequest { diff --git a/src/vizier/services/metadata/storepb/BUILD.bazel b/src/vizier/services/metadata/storepb/BUILD.bazel index f0a1ba5db8d..c2a677a2c8f 100644 --- a/src/vizier/services/metadata/storepb/BUILD.bazel +++ b/src/vizier/services/metadata/storepb/BUILD.bazel @@ -23,7 +23,6 @@ pl_proto_library( deps = [ "//src/api/proto/uuidpb:uuid_pl_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_proto", - "//src/carnot/planner/file_source/ir:logical_pl_proto", "//src/common/base/statuspb:status_pl_proto", "//src/shared/k8s/metadatapb:metadata_pl_proto", "//src/shared/types/typespb:types_pl_proto", @@ -38,7 +37,6 @@ pl_cc_proto_library( deps = [ "//src/api/proto/uuidpb:uuid_pl_cc_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_cc_proto", - "//src/carnot/planner/file_source/ir:logical_pl_cc_proto", "//src/common/base/statuspb:status_pl_cc_proto", "//src/shared/k8s/metadatapb:metadata_pl_cc_proto", "//src/shared/types/typespb/wrapper:cc_library", @@ -54,7 +52,6 @@ pl_go_proto_library( deps = [ "//src/api/proto/uuidpb:uuid_pl_go_proto", "//src/carnot/planner/dynamic_tracing/ir/logicalpb:logical_pl_go_proto", - "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/common/base/statuspb:status_pl_go_proto", "//src/shared/k8s/metadatapb:metadata_pl_go_proto", "//src/shared/types/typespb:types_pl_go_proto", diff --git a/src/vizier/services/metadata/storepb/store.pb.go b/src/vizier/services/metadata/storepb/store.pb.go index 374e966bab0..17bd3150f69 100755 --- a/src/vizier/services/metadata/storepb/store.pb.go +++ b/src/vizier/services/metadata/storepb/store.pb.go @@ -14,7 +14,6 @@ import ( math_bits "math/bits" uuidpb "px.dev/pixie/src/api/proto/uuidpb" logicalpb "px.dev/pixie/src/carnot/planner/dynamic_tracing/ir/logicalpb" - ir "px.dev/pixie/src/carnot/planner/file_source/ir" statuspb "px.dev/pixie/src/common/base/statuspb" metadatapb "px.dev/pixie/src/shared/k8s/metadatapb" typespb "px.dev/pixie/src/shared/types/typespb" @@ -100,73 +99,6 @@ func (m *TracepointInfo) GetExpectedState() statuspb.LifeCycleState { return statuspb.UNKNOWN_STATE } -type FileSourceInfo struct { - ID *uuidpb.UUID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - FileSource *ir.FileSourceDeployment `protobuf:"bytes,2,opt,name=file_source,json=fileSource,proto3" json:"file_source,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - ExpectedState statuspb.LifeCycleState `protobuf:"varint,4,opt,name=expected_state,json=expectedState,proto3,enum=px.statuspb.LifeCycleState" json:"expected_state,omitempty"` -} - -func (m *FileSourceInfo) Reset() { *m = FileSourceInfo{} } -func (*FileSourceInfo) ProtoMessage() {} -func (*FileSourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{1} -} -func (m *FileSourceInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileSourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FileSourceInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FileSourceInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileSourceInfo.Merge(m, src) -} -func (m *FileSourceInfo) XXX_Size() int { - return m.Size() -} -func (m *FileSourceInfo) XXX_DiscardUnknown() { - xxx_messageInfo_FileSourceInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_FileSourceInfo proto.InternalMessageInfo - -func (m *FileSourceInfo) GetID() *uuidpb.UUID { - if m != nil { - return m.ID - } - return nil -} - -func (m *FileSourceInfo) GetFileSource() *ir.FileSourceDeployment { - if m != nil { - return m.FileSource - } - return nil -} - -func (m *FileSourceInfo) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *FileSourceInfo) GetExpectedState() statuspb.LifeCycleState { - if m != nil { - return m.ExpectedState - } - return statuspb.UNKNOWN_STATE -} - type AgentTracepointStatus struct { State statuspb.LifeCycleState `protobuf:"varint,1,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` Status *statuspb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` @@ -177,7 +109,7 @@ type AgentTracepointStatus struct { func (m *AgentTracepointStatus) Reset() { *m = AgentTracepointStatus{} } func (*AgentTracepointStatus) ProtoMessage() {} func (*AgentTracepointStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{2} + return fileDescriptor_27ea71ea705227d1, []int{1} } func (m *AgentTracepointStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -234,73 +166,6 @@ func (m *AgentTracepointStatus) GetAgentID() *uuidpb.UUID { return nil } -type AgentFileSourceStatus struct { - State statuspb.LifeCycleState `protobuf:"varint,1,opt,name=state,proto3,enum=px.statuspb.LifeCycleState" json:"state,omitempty"` - Status *statuspb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - ID *uuidpb.UUID `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` - AgentID *uuidpb.UUID `protobuf:"bytes,4,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` -} - -func (m *AgentFileSourceStatus) Reset() { *m = AgentFileSourceStatus{} } -func (*AgentFileSourceStatus) ProtoMessage() {} -func (*AgentFileSourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{3} -} -func (m *AgentFileSourceStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AgentFileSourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AgentFileSourceStatus.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AgentFileSourceStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_AgentFileSourceStatus.Merge(m, src) -} -func (m *AgentFileSourceStatus) XXX_Size() int { - return m.Size() -} -func (m *AgentFileSourceStatus) XXX_DiscardUnknown() { - xxx_messageInfo_AgentFileSourceStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_AgentFileSourceStatus proto.InternalMessageInfo - -func (m *AgentFileSourceStatus) GetState() statuspb.LifeCycleState { - if m != nil { - return m.State - } - return statuspb.UNKNOWN_STATE -} - -func (m *AgentFileSourceStatus) GetStatus() *statuspb.Status { - if m != nil { - return m.Status - } - return nil -} - -func (m *AgentFileSourceStatus) GetID() *uuidpb.UUID { - if m != nil { - return m.ID - } - return nil -} - -func (m *AgentFileSourceStatus) GetAgentID() *uuidpb.UUID { - if m != nil { - return m.AgentID - } - return nil -} - type TableInfo struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Desc string `protobuf:"bytes,7,opt,name=desc,proto3" json:"desc,omitempty"` @@ -309,13 +174,12 @@ type TableInfo struct { Columns []*TableInfo_ColumnInfo `protobuf:"bytes,4,rep,name=columns,proto3" json:"columns,omitempty"` Tabletized bool `protobuf:"varint,5,opt,name=tabletized,proto3" json:"tabletized,omitempty"` TabletizationKey string `protobuf:"bytes,6,opt,name=tabletization_key,json=tabletizationKey,proto3" json:"tabletization_key,omitempty"` - MutationId string `protobuf:"bytes,8,opt,name=mutation_id,json=mutationId,proto3" json:"mutation_id,omitempty"` } func (m *TableInfo) Reset() { *m = TableInfo{} } func (*TableInfo) ProtoMessage() {} func (*TableInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{4} + return fileDescriptor_27ea71ea705227d1, []int{2} } func (m *TableInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -393,13 +257,6 @@ func (m *TableInfo) GetTabletizationKey() string { return "" } -func (m *TableInfo) GetMutationId() string { - if m != nil { - return m.MutationId - } - return "" -} - type TableInfo_ColumnInfo struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` DataType typespb.DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=px.types.DataType" json:"data_type,omitempty"` @@ -411,7 +268,7 @@ type TableInfo_ColumnInfo struct { func (m *TableInfo_ColumnInfo) Reset() { *m = TableInfo_ColumnInfo{} } func (*TableInfo_ColumnInfo) ProtoMessage() {} func (*TableInfo_ColumnInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{4, 0} + return fileDescriptor_27ea71ea705227d1, []int{2, 0} } func (m *TableInfo_ColumnInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -483,7 +340,7 @@ type ComputedSchema struct { func (m *ComputedSchema) Reset() { *m = ComputedSchema{} } func (*ComputedSchema) ProtoMessage() {} func (*ComputedSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{5} + return fileDescriptor_27ea71ea705227d1, []int{3} } func (m *ComputedSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -533,7 +390,7 @@ type ComputedSchema_AgentIDs struct { func (m *ComputedSchema_AgentIDs) Reset() { *m = ComputedSchema_AgentIDs{} } func (*ComputedSchema_AgentIDs) ProtoMessage() {} func (*ComputedSchema_AgentIDs) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{5, 0} + return fileDescriptor_27ea71ea705227d1, []int{3, 0} } func (m *ComputedSchema_AgentIDs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -585,7 +442,7 @@ type K8SResource struct { func (m *K8SResource) Reset() { *m = K8SResource{} } func (*K8SResource) ProtoMessage() {} func (*K8SResource) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{6} + return fileDescriptor_27ea71ea705227d1, []int{4} } func (m *K8SResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -739,7 +596,7 @@ type K8SResourceUpdate struct { func (m *K8SResourceUpdate) Reset() { *m = K8SResourceUpdate{} } func (*K8SResourceUpdate) ProtoMessage() {} func (*K8SResourceUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{7} + return fileDescriptor_27ea71ea705227d1, []int{5} } func (m *K8SResourceUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -788,7 +645,7 @@ type CronScriptResult struct { func (m *CronScriptResult) Reset() { *m = CronScriptResult{} } func (*CronScriptResult) ProtoMessage() {} func (*CronScriptResult) Descriptor() ([]byte, []int) { - return fileDescriptor_27ea71ea705227d1, []int{8} + return fileDescriptor_27ea71ea705227d1, []int{6} } func (m *CronScriptResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -868,9 +725,7 @@ func (m *CronScriptResult) GetRecordsProcessed() int64 { func init() { proto.RegisterType((*TracepointInfo)(nil), "px.vizier.services.metadata.TracepointInfo") - proto.RegisterType((*FileSourceInfo)(nil), "px.vizier.services.metadata.FileSourceInfo") proto.RegisterType((*AgentTracepointStatus)(nil), "px.vizier.services.metadata.AgentTracepointStatus") - proto.RegisterType((*AgentFileSourceStatus)(nil), "px.vizier.services.metadata.AgentFileSourceStatus") proto.RegisterType((*TableInfo)(nil), "px.vizier.services.metadata.TableInfo") proto.RegisterType((*TableInfo_ColumnInfo)(nil), "px.vizier.services.metadata.TableInfo.ColumnInfo") proto.RegisterType((*ComputedSchema)(nil), "px.vizier.services.metadata.ComputedSchema") @@ -886,92 +741,87 @@ func init() { } var fileDescriptor_27ea71ea705227d1 = []byte{ - // 1352 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x57, 0x4d, 0x6f, 0x1b, 0xb7, - 0x16, 0xd5, 0x58, 0xb2, 0x2d, 0x5f, 0x25, 0xfe, 0xa0, 0x93, 0x17, 0xc1, 0xc1, 0x1b, 0xf9, 0xf9, - 0xe5, 0xbd, 0x38, 0x0d, 0x30, 0xd3, 0xb8, 0x41, 0x6b, 0xa4, 0x48, 0x3f, 0x64, 0x25, 0xb5, 0x9a, - 0xc2, 0x08, 0x46, 0x36, 0x0a, 0x74, 0x33, 0xa0, 0x66, 0x68, 0x65, 0x10, 0xcd, 0x90, 0x20, 0xa9, - 0xc0, 0x0a, 0xba, 0xe8, 0x4f, 0x28, 0xd0, 0x1f, 0xd1, 0xf6, 0x9f, 0x74, 0x19, 0x74, 0x95, 0x02, - 0x85, 0xd1, 0x28, 0x28, 0x50, 0x74, 0x95, 0x4d, 0xf7, 0x05, 0xc9, 0xf9, 0x52, 0x63, 0x3b, 0xc9, - 0xa2, 0x9b, 0x6e, 0xac, 0x4b, 0xf2, 0x9c, 0x33, 0x3c, 0x97, 0x77, 0x2e, 0xc7, 0xf0, 0xb6, 0xe0, - 0x81, 0xfb, 0x28, 0x7a, 0x1c, 0x11, 0xee, 0x0a, 0xc2, 0x1f, 0x45, 0x01, 0x11, 0x6e, 0x4c, 0x24, - 0x0e, 0xb1, 0xc4, 0xae, 0x90, 0x94, 0x13, 0xd6, 0x37, 0xbf, 0x0e, 0xe3, 0x54, 0x52, 0x74, 0x99, - 0x1d, 0x39, 0x86, 0xe0, 0x64, 0x04, 0x27, 0x23, 0xac, 0x5d, 0x18, 0xd0, 0x01, 0xd5, 0x38, 0x57, - 0x45, 0x86, 0xb2, 0xd6, 0x1a, 0x50, 0x3a, 0x18, 0x12, 0x57, 0x8f, 0xfa, 0xa3, 0x43, 0x57, 0x46, - 0x31, 0x11, 0x12, 0xc7, 0x2c, 0x03, 0xa8, 0x5d, 0x60, 0x16, 0x19, 0x84, 0x3b, 0x1a, 0x45, 0x21, - 0xeb, 0xeb, 0x9f, 0x14, 0x70, 0x5b, 0x01, 0x02, 0xcc, 0x13, 0x2a, 0x5d, 0x36, 0xc4, 0x49, 0x42, - 0xb8, 0x1b, 0x8e, 0x13, 0x1c, 0x47, 0x81, 0x2f, 0x39, 0x0e, 0xa2, 0x64, 0xe0, 0x46, 0xdc, 0x1d, - 0xd2, 0x41, 0x14, 0xe0, 0x21, 0xeb, 0x67, 0x51, 0x4a, 0x77, 0x4f, 0xa0, 0x1f, 0x46, 0x43, 0xe2, - 0x0b, 0x3a, 0xe2, 0x01, 0x29, 0x51, 0x53, 0xc2, 0xff, 0x34, 0x81, 0xc6, 0x31, 0x4d, 0xdc, 0x3e, - 0x16, 0xc4, 0x15, 0x12, 0xcb, 0x91, 0xd0, 0xa9, 0x50, 0x41, 0x0a, 0xdb, 0x54, 0x30, 0xf1, 0x00, - 0x73, 0x12, 0xba, 0x0f, 0xb7, 0x8b, 0xc4, 0xb1, 0x7e, 0x1e, 0xa6, 0xc8, 0x2b, 0x25, 0xa4, 0x1c, - 0x33, 0x22, 0xcc, 0x5f, 0xd6, 0x37, 0xbf, 0x06, 0xb5, 0xf1, 0x87, 0x05, 0x8b, 0xfb, 0x1c, 0x07, - 0x84, 0xd1, 0x28, 0x91, 0xdd, 0xe4, 0x90, 0xa2, 0xab, 0x30, 0x13, 0x85, 0x4d, 0x6b, 0xdd, 0xda, - 0x6c, 0x6c, 0x2d, 0x39, 0xec, 0xc8, 0x31, 0xc9, 0x71, 0x0e, 0x0e, 0xba, 0x9d, 0xf6, 0xdc, 0xe4, - 0xb8, 0x35, 0xd3, 0xed, 0x78, 0x33, 0x51, 0x88, 0xfa, 0x00, 0x32, 0xa7, 0x36, 0x67, 0x34, 0xa1, - 0xad, 0x08, 0xc6, 0xb7, 0x93, 0xfa, 0x76, 0xfe, 0x92, 0x36, 0x27, 0xe2, 0x4e, 0xe6, 0xbd, 0x78, - 0x74, 0x87, 0xb0, 0x21, 0x1d, 0xc7, 0x24, 0x91, 0x5e, 0x49, 0x15, 0x21, 0xa8, 0x25, 0x38, 0x26, - 0xcd, 0xea, 0xba, 0xb5, 0xb9, 0xe0, 0xe9, 0x18, 0xb5, 0x61, 0x91, 0x1c, 0x31, 0x12, 0x48, 0x12, - 0xfa, 0x2a, 0x39, 0xa4, 0x59, 0x5b, 0xb7, 0x36, 0x17, 0xb7, 0x2e, 0xab, 0x67, 0x67, 0x69, 0x73, - 0x3e, 0x8b, 0x0e, 0xc9, 0xce, 0x38, 0x18, 0x92, 0x9e, 0x82, 0x78, 0xe7, 0x33, 0x8a, 0x1e, 0x6e, - 0xfc, 0x6e, 0xc1, 0xe2, 0xdd, 0x68, 0x48, 0x7a, 0xfa, 0x38, 0xde, 0xcc, 0xf7, 0xe7, 0xd0, 0x28, - 0x1d, 0x65, 0x6a, 0xfc, 0xdd, 0x13, 0x8c, 0x97, 0x50, 0xca, 0x74, 0xf1, 0xbc, 0xb2, 0xd9, 0xc3, - 0x7c, 0xf6, 0x6f, 0x33, 0xfb, 0x93, 0x05, 0x17, 0x3f, 0x1e, 0x90, 0x44, 0x16, 0xe9, 0xee, 0x69, - 0x26, 0xba, 0x01, 0xb3, 0x46, 0xd4, 0x7a, 0xb5, 0xa8, 0x41, 0xa2, 0xeb, 0x30, 0x67, 0x10, 0xa9, - 0xf1, 0xd5, 0x29, 0x8e, 0xd1, 0xf5, 0x52, 0x48, 0x9a, 0xd3, 0xea, 0xab, 0x73, 0xfa, 0x1e, 0xd4, - 0xb1, 0xda, 0xa1, 0x1f, 0x85, 0xda, 0xe0, 0x09, 0xf0, 0xc6, 0xe4, 0xb8, 0x35, 0xaf, 0x6d, 0x74, - 0x3b, 0xde, 0xbc, 0x46, 0x77, 0xc3, 0xc2, 0x5b, 0x91, 0xdd, 0x7f, 0x8c, 0xb7, 0x1f, 0x6b, 0xb0, - 0xb0, 0x8f, 0xfb, 0x43, 0x53, 0x9f, 0x59, 0x75, 0x58, 0xa5, 0xea, 0x40, 0x50, 0x0b, 0x89, 0x08, - 0x9a, 0xf3, 0x66, 0x4e, 0xc5, 0xa8, 0x0d, 0x48, 0x48, 0xcc, 0xa5, 0x9f, 0xf7, 0x3c, 0x3f, 0x31, - 0x86, 0xaa, 0xed, 0x0b, 0x93, 0xe3, 0xd6, 0x72, 0x4f, 0xad, 0xee, 0x67, 0x8b, 0x7b, 0x3d, 0x6f, - 0x59, 0x4c, 0xcf, 0x08, 0xf4, 0x21, 0xac, 0x08, 0x49, 0xd9, 0xb4, 0x44, 0x55, 0x4b, 0xac, 0x4e, - 0x8e, 0x5b, 0x4b, 0x3d, 0x49, 0x59, 0x59, 0x61, 0x49, 0x4c, 0x4d, 0x08, 0x74, 0x0f, 0xe6, 0x03, - 0x3a, 0x1c, 0xc5, 0x89, 0x68, 0xd6, 0xd6, 0xab, 0x9b, 0x8d, 0xad, 0x1b, 0xce, 0x19, 0x5d, 0xdc, - 0xc9, 0x5d, 0x3a, 0x3b, 0x9a, 0xa5, 0x42, 0x2f, 0x53, 0x40, 0x36, 0x80, 0x54, 0x00, 0x19, 0x3d, - 0x26, 0x61, 0x73, 0x76, 0xdd, 0xda, 0xac, 0x7b, 0xa5, 0x19, 0x74, 0x1d, 0x56, 0xb2, 0x11, 0x96, - 0x11, 0x4d, 0xfc, 0x87, 0x64, 0xdc, 0x9c, 0xd3, 0x29, 0x59, 0x9e, 0x5a, 0xb8, 0x47, 0xc6, 0xa8, - 0x05, 0x8d, 0x78, 0x24, 0x0d, 0x2e, 0x0a, 0x9b, 0x75, 0x0d, 0x83, 0x6c, 0xaa, 0x1b, 0xae, 0xfd, - 0x6c, 0x01, 0x14, 0xbb, 0x38, 0x31, 0xed, 0x2e, 0x2c, 0xa8, 0x6d, 0xfb, 0xaa, 0x93, 0xea, 0xcc, - 0x2e, 0x6e, 0x21, 0xe5, 0xcf, 0x74, 0xd6, 0x0e, 0x96, 0x78, 0x7f, 0xcc, 0x88, 0x57, 0x0f, 0xd3, - 0x08, 0x6d, 0xc3, 0x39, 0x86, 0xa5, 0x24, 0x3c, 0x31, 0x9c, 0xaa, 0xe6, 0x5c, 0x2c, 0x38, 0xf7, - 0xcd, 0xaa, 0xa6, 0x35, 0x58, 0x31, 0xc8, 0x4f, 0xb8, 0x56, 0x3a, 0xe1, 0xf7, 0xe1, 0xbc, 0x20, - 0x31, 0x4e, 0xa4, 0x6a, 0xaa, 0x4a, 0x6e, 0x56, 0xcb, 0xfd, 0xab, 0x90, 0xeb, 0xa5, 0xcb, 0x5a, - 0xef, 0x9c, 0x28, 0x8d, 0x36, 0xbe, 0xaf, 0xc2, 0xe2, 0x0e, 0x8d, 0xd9, 0x48, 0xb5, 0x87, 0xe0, - 0x01, 0x89, 0x31, 0xfa, 0x00, 0xe6, 0x74, 0x9a, 0x44, 0xd3, 0xd2, 0x67, 0xf5, 0xff, 0xd7, 0x3b, - 0x2b, 0x2f, 0x65, 0xa1, 0x6f, 0x2c, 0xb8, 0xa4, 0x43, 0x5f, 0x65, 0xc7, 0x97, 0xd4, 0xcf, 0xea, - 0x5d, 0xd5, 0x9d, 0x52, 0xec, 0x9c, 0xa9, 0x38, 0xbd, 0x1d, 0xf3, 0x80, 0x3d, 0x1c, 0x93, 0x7d, - 0x6a, 0x5e, 0x89, 0x50, 0xdc, 0x49, 0x24, 0x1f, 0xb7, 0x2f, 0x4d, 0x8e, 0x5b, 0xab, 0x2f, 0xad, - 0x76, 0x84, 0xb7, 0x2a, 0x5f, 0xa6, 0xac, 0xed, 0x40, 0x3d, 0x03, 0x4c, 0xbd, 0x82, 0xc6, 0xe3, - 0xeb, 0xbd, 0x82, 0x6b, 0x5f, 0x42, 0xf3, 0xb4, 0xed, 0xa0, 0x65, 0xa8, 0xaa, 0x42, 0x33, 0x85, - 0xa1, 0x42, 0xf4, 0x29, 0xcc, 0x3e, 0xc2, 0xc3, 0x51, 0x76, 0x27, 0xdc, 0x7c, 0x13, 0xd7, 0xb9, - 0x19, 0x23, 0x71, 0x6b, 0x66, 0xdb, 0xda, 0xf8, 0xb6, 0x06, 0x8d, 0x7b, 0xdb, 0xc2, 0x23, 0xe6, - 0x12, 0x41, 0x37, 0xa0, 0xca, 0x68, 0x76, 0x47, 0xfd, 0x5b, 0x37, 0x27, 0x7d, 0xc1, 0x3b, 0x0f, - 0xb7, 0x0b, 0x61, 0xd6, 0x77, 0xee, 0xd3, 0x70, 0xb7, 0xe2, 0x29, 0x2c, 0xea, 0xc2, 0x42, 0x40, - 0x13, 0x89, 0xa3, 0x84, 0xf0, 0x74, 0x5b, 0xd7, 0x4e, 0x27, 0xee, 0x64, 0xd0, 0x03, 0x16, 0x62, - 0x49, 0x76, 0x2b, 0x5e, 0xc1, 0x46, 0xb7, 0x61, 0x3e, 0x75, 0x91, 0x76, 0xbd, 0xff, 0x9c, 0x2e, - 0xd4, 0x33, 0xc0, 0xdd, 0x8a, 0x97, 0x71, 0xd0, 0x0e, 0x2c, 0x90, 0x24, 0xd4, 0xb7, 0x8f, 0x48, - 0xfb, 0xe0, 0x7f, 0x4f, 0x17, 0xb8, 0x93, 0x41, 0xd5, 0x1e, 0x72, 0x9e, 0x12, 0x51, 0x35, 0x26, - 0x18, 0x0e, 0x4c, 0xd9, 0x9f, 0x29, 0xb2, 0x97, 0x41, 0x95, 0x48, 0xce, 0x43, 0x37, 0xa1, 0x96, - 0xd0, 0x90, 0xe8, 0x16, 0xd1, 0xd8, 0xb2, 0xcf, 0xe0, 0xd3, 0x50, 0x51, 0x35, 0x1a, 0x7d, 0x02, - 0x0d, 0x4e, 0xd8, 0x30, 0x0a, 0xb0, 0x2f, 0x88, 0xd4, 0x2d, 0xb7, 0xb1, 0x75, 0xe5, 0x74, 0xb2, - 0x67, 0xc0, 0x3d, 0x22, 0x77, 0x2b, 0x1e, 0xf0, 0x7c, 0x84, 0xee, 0x02, 0x84, 0xf9, 0x07, 0x80, - 0x6e, 0x40, 0x67, 0xea, 0x14, 0x1f, 0x0b, 0x4a, 0xa7, 0x60, 0xb6, 0x01, 0xea, 0x3c, 0xad, 0x8c, - 0x8d, 0x03, 0x58, 0x29, 0x15, 0x8a, 0x39, 0x3d, 0xf4, 0x11, 0xcc, 0x8d, 0x74, 0x94, 0x56, 0xcc, - 0xe6, 0x59, 0x9b, 0x2d, 0x33, 0xbd, 0x94, 0xb7, 0xf1, 0xeb, 0x0c, 0x2c, 0xef, 0x70, 0x9a, 0xf4, - 0x02, 0x1e, 0x31, 0xe9, 0x11, 0x31, 0x1a, 0x4a, 0x74, 0x0b, 0x16, 0x84, 0x1e, 0xfb, 0xa7, 0x7f, - 0x2f, 0x9d, 0x9b, 0x1c, 0xb7, 0xea, 0x86, 0xd5, 0xed, 0x78, 0x75, 0x83, 0xef, 0x86, 0x68, 0x1b, - 0x16, 0xf2, 0x3b, 0x25, 0x2d, 0xc7, 0x35, 0xc7, 0x7c, 0xac, 0x3b, 0xd9, 0xc7, 0xba, 0x93, 0x5f, - 0x24, 0x5e, 0x01, 0x46, 0xd7, 0x60, 0x96, 0x70, 0x4e, 0x79, 0x5a, 0x7b, 0x27, 0x5e, 0xcd, 0x06, - 0x81, 0xde, 0x82, 0x15, 0x72, 0x44, 0x82, 0x91, 0xee, 0xf1, 0x4a, 0xc1, 0x4f, 0x4c, 0xc5, 0x55, - 0xbd, 0xa5, 0x7c, 0x41, 0x3d, 0x64, 0x4f, 0x20, 0x07, 0x56, 0x03, 0x1a, 0xb3, 0x68, 0x88, 0xa7, - 0xd0, 0xb3, 0x1a, 0xbd, 0x52, 0x5a, 0x4a, 0xf1, 0x57, 0x61, 0xa9, 0x3f, 0x96, 0x44, 0xf8, 0x8c, - 0xd3, 0x80, 0x08, 0x41, 0x42, 0x5d, 0x46, 0x55, 0x6f, 0x51, 0x4f, 0xdf, 0xcf, 0x66, 0xd5, 0xa5, - 0xc4, 0x49, 0x40, 0x79, 0x58, 0x86, 0xce, 0x6b, 0xe8, 0x72, 0xba, 0x90, 0x83, 0xdb, 0xb7, 0x9f, - 0x3c, 0xb3, 0x2b, 0x4f, 0x9f, 0xd9, 0x95, 0x17, 0xcf, 0x6c, 0xeb, 0xab, 0x89, 0x6d, 0x7d, 0x37, - 0xb1, 0xad, 0x1f, 0x26, 0xb6, 0xf5, 0x64, 0x62, 0x5b, 0xbf, 0x4c, 0x6c, 0xeb, 0xb7, 0x89, 0x5d, - 0x79, 0x31, 0xb1, 0xad, 0xaf, 0x9f, 0xdb, 0x95, 0x27, 0xcf, 0xed, 0xca, 0xd3, 0xe7, 0x76, 0xe5, - 0x8b, 0xf9, 0xf4, 0xbf, 0xa5, 0xfe, 0x9c, 0x4e, 0xdd, 0x3b, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, - 0xa4, 0x06, 0x48, 0xe3, 0x5c, 0x0d, 0x00, 0x00, + // 1273 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xc6, 0x4e, 0x62, 0x3f, 0xb7, 0xf9, 0x33, 0x69, 0xa9, 0x95, 0x8a, 0x75, 0x30, 0x85, + 0xba, 0x54, 0xda, 0xa5, 0xa1, 0x12, 0x51, 0x51, 0xf9, 0x63, 0xbb, 0x10, 0x53, 0x14, 0x55, 0xeb, + 0xe4, 0xc2, 0x65, 0x35, 0xde, 0x9d, 0xba, 0xab, 0x7a, 0x77, 0x46, 0x33, 0xe3, 0x2a, 0xae, 0x38, + 0xf0, 0x11, 0x90, 0xf8, 0x10, 0xc0, 0x91, 0x6f, 0xc1, 0xb1, 0xc7, 0x22, 0xa1, 0x88, 0x6e, 0x85, + 0xc4, 0xb1, 0x17, 0xee, 0x68, 0x66, 0x76, 0xd7, 0x36, 0x6d, 0xd2, 0x72, 0xb1, 0xdf, 0xcc, 0xfc, + 0x7e, 0xbf, 0x79, 0xef, 0xcd, 0x9b, 0x37, 0x0b, 0x1f, 0x0a, 0x1e, 0xb8, 0x8f, 0xa2, 0xc7, 0x11, + 0xe1, 0xae, 0x20, 0xfc, 0x51, 0x14, 0x10, 0xe1, 0xc6, 0x44, 0xe2, 0x10, 0x4b, 0xec, 0x0a, 0x49, + 0x39, 0x61, 0x43, 0xf3, 0xef, 0x30, 0x4e, 0x25, 0x45, 0x97, 0xd9, 0xb1, 0x63, 0x08, 0x4e, 0x4e, + 0x70, 0x72, 0xc2, 0xf6, 0x85, 0x11, 0x1d, 0x51, 0x8d, 0x73, 0x95, 0x65, 0x28, 0xdb, 0xcd, 0x11, + 0xa5, 0xa3, 0x31, 0x71, 0xf5, 0x68, 0x38, 0xb9, 0xef, 0xca, 0x28, 0x26, 0x42, 0xe2, 0x98, 0xe5, + 0x00, 0xe5, 0x05, 0x66, 0x91, 0x41, 0xb8, 0x93, 0x49, 0x14, 0xb2, 0xa1, 0xfe, 0xcb, 0x00, 0xb7, + 0x15, 0x20, 0xc0, 0x3c, 0xa1, 0xd2, 0x65, 0x63, 0x9c, 0x24, 0x84, 0xbb, 0xe1, 0x34, 0xc1, 0x71, + 0x14, 0xf8, 0x92, 0xe3, 0x20, 0x4a, 0x46, 0x6e, 0xc4, 0xdd, 0x31, 0x1d, 0x45, 0x01, 0x1e, 0xb3, + 0x61, 0x6e, 0x65, 0xf4, 0xf7, 0x34, 0x9d, 0xc6, 0x31, 0x4d, 0xdc, 0x21, 0x16, 0xc4, 0x15, 0x12, + 0xcb, 0x89, 0xd0, 0x91, 0x29, 0x23, 0x83, 0xb5, 0x15, 0x4c, 0x3c, 0xc0, 0x9c, 0x84, 0xee, 0xc3, + 0xbd, 0x59, 0x1e, 0xd8, 0xb0, 0x30, 0x33, 0xe4, 0x95, 0x39, 0xa4, 0x9c, 0x32, 0x22, 0xcc, 0x2f, + 0x1b, 0x9a, 0x7f, 0x83, 0x6a, 0xfd, 0x63, 0xc1, 0xda, 0x21, 0xc7, 0x01, 0x61, 0x34, 0x4a, 0x64, + 0x3f, 0xb9, 0x4f, 0xd1, 0x55, 0x58, 0x8a, 0xc2, 0x86, 0xb5, 0x63, 0xb5, 0xeb, 0xbb, 0xeb, 0x0e, + 0x3b, 0x76, 0x4c, 0xac, 0xce, 0xd1, 0x51, 0xbf, 0xd7, 0x59, 0x49, 0x4f, 0x9a, 0x4b, 0xfd, 0x9e, + 0xb7, 0x14, 0x85, 0x68, 0x08, 0x20, 0x0b, 0x6a, 0x63, 0x49, 0x13, 0x3a, 0x8a, 0x60, 0xb2, 0xe0, + 0x64, 0x59, 0x70, 0xfe, 0x93, 0x05, 0x27, 0xe2, 0x4e, 0x1e, 0xfb, 0x6c, 0xeb, 0x1e, 0x61, 0x63, + 0x3a, 0x8d, 0x49, 0x22, 0xbd, 0x39, 0x55, 0x84, 0xa0, 0x92, 0xe0, 0x98, 0x34, 0xca, 0x3b, 0x56, + 0xbb, 0xe6, 0x69, 0x1b, 0x75, 0x60, 0x8d, 0x1c, 0x33, 0x12, 0x48, 0x12, 0xfa, 0x2a, 0x39, 0xa4, + 0x51, 0xd9, 0xb1, 0xda, 0x6b, 0xbb, 0x97, 0xd5, 0xde, 0x79, 0xda, 0x9c, 0x6f, 0xa2, 0xfb, 0xa4, + 0x3b, 0x0d, 0xc6, 0x64, 0xa0, 0x20, 0xde, 0xf9, 0x9c, 0xa2, 0x87, 0xad, 0xdf, 0x2d, 0xb8, 0xf8, + 0xc5, 0x88, 0x24, 0x72, 0xe6, 0xc1, 0x40, 0x33, 0xd1, 0x0d, 0x58, 0x36, 0xa2, 0xd6, 0xeb, 0x45, + 0x0d, 0x12, 0x5d, 0x87, 0x15, 0x83, 0xc8, 0x92, 0xb0, 0xb5, 0xc0, 0x31, 0xba, 0x5e, 0x06, 0xc9, + 0xd2, 0x5b, 0x7e, 0x7d, 0x7a, 0x3f, 0x86, 0x2a, 0x56, 0x1e, 0xfa, 0x51, 0xa8, 0x03, 0x7c, 0x05, + 0xbc, 0x9e, 0x9e, 0x34, 0x57, 0x75, 0x18, 0xfd, 0x9e, 0xb7, 0xaa, 0xd1, 0xfd, 0xb0, 0xf5, 0x6b, + 0x05, 0x6a, 0x87, 0x78, 0x38, 0x26, 0xfa, 0x38, 0xf3, 0x0c, 0x5a, 0x73, 0x19, 0x44, 0x50, 0x09, + 0x89, 0x08, 0x1a, 0xab, 0x66, 0x4e, 0xd9, 0xa8, 0x03, 0x48, 0x48, 0xcc, 0xa5, 0x5f, 0x54, 0xbe, + 0x9f, 0x98, 0x80, 0xca, 0x9d, 0x0b, 0xe9, 0x49, 0x73, 0x63, 0xa0, 0x56, 0x0f, 0xf3, 0xc5, 0x83, + 0x81, 0xb7, 0x21, 0x16, 0x67, 0x04, 0xfa, 0x0c, 0x36, 0x85, 0xa4, 0x6c, 0x51, 0xa2, 0xac, 0x25, + 0xb6, 0xd2, 0x93, 0xe6, 0xfa, 0x40, 0x52, 0x36, 0xaf, 0xb0, 0x2e, 0x16, 0x26, 0x04, 0xba, 0x0b, + 0xab, 0x01, 0x1d, 0x4f, 0xe2, 0x44, 0x34, 0x2a, 0x3b, 0xe5, 0x76, 0x7d, 0xf7, 0x86, 0x73, 0xc6, + 0x5d, 0x76, 0x8a, 0x28, 0x9d, 0xae, 0x66, 0x29, 0xd3, 0xcb, 0x15, 0x90, 0x0d, 0x20, 0x15, 0x40, + 0x46, 0x8f, 0x49, 0xd8, 0x58, 0xde, 0xb1, 0xda, 0x55, 0x6f, 0x6e, 0x06, 0x5d, 0x87, 0xcd, 0x7c, + 0x84, 0x65, 0x44, 0x13, 0xff, 0x21, 0x99, 0x36, 0x56, 0x74, 0x4a, 0x36, 0x16, 0x16, 0xee, 0x92, + 0xe9, 0xf6, 0x1f, 0x16, 0xc0, 0x6c, 0x93, 0x57, 0x66, 0xd5, 0x85, 0x9a, 0xf2, 0xca, 0x57, 0xf7, + 0x4b, 0x27, 0x6e, 0x6d, 0x17, 0x29, 0xf7, 0xcd, 0x7d, 0xeb, 0x61, 0x89, 0x0f, 0xa7, 0x8c, 0x78, + 0xd5, 0x30, 0xb3, 0xd0, 0x1e, 0x9c, 0x63, 0x58, 0x4a, 0xc2, 0x13, 0xc3, 0x29, 0x6b, 0xce, 0xc5, + 0x19, 0xe7, 0x9e, 0x59, 0xd5, 0xb4, 0x3a, 0x9b, 0x0d, 0x8a, 0x03, 0xac, 0xcc, 0x1d, 0xe0, 0x27, + 0x70, 0x5e, 0x90, 0x18, 0x27, 0x52, 0x5d, 0x35, 0x25, 0xb7, 0xac, 0xe5, 0xde, 0x9a, 0xc9, 0x0d, + 0xb2, 0x65, 0xad, 0x77, 0x4e, 0xcc, 0x8d, 0x5a, 0xbf, 0x94, 0x61, 0xad, 0x4b, 0x63, 0x36, 0x51, + 0x37, 0x24, 0x78, 0x40, 0x62, 0x8c, 0x3e, 0x85, 0x15, 0x9d, 0x05, 0xd1, 0xb0, 0xf4, 0x51, 0xbc, + 0xff, 0x66, 0x47, 0xe1, 0x65, 0x2c, 0xf4, 0xa3, 0x05, 0x97, 0xb4, 0xe9, 0xab, 0xec, 0xf8, 0x92, + 0xfa, 0x79, 0x39, 0xab, 0xb2, 0x52, 0x8a, 0xbd, 0x33, 0x15, 0x17, 0xdd, 0x31, 0x1b, 0x1c, 0xe0, + 0x98, 0x1c, 0x52, 0x53, 0xf1, 0xa1, 0xb8, 0x93, 0x48, 0x3e, 0xed, 0x5c, 0x4a, 0x4f, 0x9a, 0x5b, + 0x2f, 0xad, 0xf6, 0x84, 0xb7, 0x25, 0x5f, 0xa6, 0x6c, 0x77, 0xa1, 0x9a, 0x03, 0x16, 0x6e, 0x98, + 0x89, 0xf1, 0xcd, 0x6e, 0xd8, 0xf6, 0x77, 0xd0, 0x38, 0xcd, 0x1d, 0xb4, 0x01, 0x65, 0x55, 0x47, + 0xa6, 0x30, 0x94, 0x89, 0xbe, 0x86, 0xe5, 0x47, 0x78, 0x3c, 0x21, 0x59, 0x77, 0xb8, 0xf9, 0x7f, + 0xa2, 0x2e, 0x82, 0x31, 0x12, 0xb7, 0x96, 0xf6, 0xac, 0xd6, 0x4f, 0x15, 0xa8, 0xdf, 0xdd, 0x13, + 0x1e, 0x11, 0x74, 0xc2, 0x03, 0x82, 0x6e, 0x40, 0x99, 0xd1, 0xbc, 0x63, 0xbf, 0xad, 0x7b, 0x8f, + 0x6e, 0xfb, 0xce, 0xc3, 0xbd, 0x99, 0x30, 0x1b, 0x3a, 0xf7, 0x68, 0xb8, 0x5f, 0xf2, 0x14, 0x16, + 0xf5, 0xa1, 0x16, 0xd0, 0x44, 0xe2, 0x28, 0x21, 0x3c, 0x73, 0xeb, 0xda, 0xe9, 0xc4, 0x6e, 0x0e, + 0x3d, 0x62, 0x21, 0x96, 0x64, 0xbf, 0xe4, 0xcd, 0xd8, 0xe8, 0x36, 0xac, 0x66, 0x51, 0x64, 0x4d, + 0xed, 0x9d, 0xd3, 0x85, 0x06, 0x06, 0xb8, 0x5f, 0xf2, 0x72, 0x0e, 0xea, 0x42, 0x8d, 0x24, 0xa1, + 0x6e, 0xc0, 0x22, 0x6b, 0x73, 0xef, 0x9e, 0x2e, 0x70, 0x27, 0x87, 0x2a, 0x1f, 0x0a, 0x9e, 0x12, + 0x51, 0x35, 0x26, 0x18, 0x0e, 0x4c, 0xd9, 0x9f, 0x29, 0x72, 0x90, 0x43, 0x95, 0x48, 0xc1, 0x43, + 0x37, 0xa1, 0x92, 0xd0, 0x90, 0xe8, 0x0e, 0x50, 0xdf, 0xb5, 0xcf, 0xe0, 0xd3, 0x50, 0x51, 0x35, + 0x1a, 0x7d, 0x05, 0x75, 0x4e, 0xd8, 0x38, 0x0a, 0xb0, 0x2f, 0x88, 0xd4, 0x1d, 0xb5, 0xbe, 0x7b, + 0xe5, 0x74, 0xb2, 0x67, 0xc0, 0x03, 0x22, 0xf7, 0x4b, 0x1e, 0xf0, 0x62, 0x84, 0xbe, 0x04, 0x08, + 0x8b, 0x37, 0xb0, 0x51, 0x7d, 0x9d, 0xce, 0xec, 0xbd, 0x54, 0x3a, 0x33, 0x66, 0x07, 0xa0, 0xca, + 0xb3, 0xca, 0x68, 0x1d, 0xc1, 0xe6, 0x5c, 0xa1, 0x98, 0xd3, 0x43, 0x9f, 0xc3, 0xca, 0x44, 0x5b, + 0x59, 0xc5, 0xb4, 0xcf, 0x72, 0x76, 0x9e, 0xe9, 0x65, 0xbc, 0xd6, 0x5f, 0x4b, 0xb0, 0xd1, 0xe5, + 0x34, 0x19, 0x04, 0x3c, 0x62, 0xd2, 0x23, 0x62, 0x32, 0x96, 0xe8, 0x16, 0xd4, 0x84, 0x1e, 0xfb, + 0xa7, 0x7f, 0x3d, 0x9c, 0x4b, 0x4f, 0x9a, 0x55, 0xc3, 0xea, 0xf7, 0xbc, 0xaa, 0xc1, 0xf7, 0x43, + 0xb4, 0x07, 0xb5, 0xe2, 0xc9, 0xc8, 0xca, 0x71, 0xdb, 0x31, 0x5f, 0x64, 0x4e, 0xfe, 0x45, 0xe6, + 0x14, 0xef, 0x84, 0x37, 0x03, 0xa3, 0x6b, 0xb0, 0x4c, 0x38, 0xa7, 0x3c, 0xab, 0xbd, 0x57, 0xbe, + 0xbc, 0x06, 0x81, 0x3e, 0x80, 0x4d, 0x72, 0x4c, 0x82, 0x89, 0x6e, 0xf5, 0x4a, 0xc1, 0x4f, 0x4c, + 0xc5, 0x95, 0xbd, 0xf5, 0x62, 0x41, 0x6d, 0x72, 0x20, 0x90, 0x03, 0x5b, 0x01, 0x8d, 0x59, 0x34, + 0xc6, 0x0b, 0xe8, 0x65, 0x8d, 0xde, 0x9c, 0x5b, 0xca, 0xf0, 0x57, 0x61, 0x7d, 0x38, 0x95, 0x44, + 0xf8, 0x8c, 0xd3, 0x80, 0x08, 0x41, 0x42, 0x5d, 0x46, 0x65, 0x6f, 0x4d, 0x4f, 0xdf, 0xcb, 0x67, + 0xd5, 0x9b, 0xc3, 0x49, 0x40, 0x79, 0x38, 0x0f, 0x5d, 0xd5, 0xd0, 0x8d, 0x6c, 0xa1, 0x00, 0x77, + 0x6e, 0x3f, 0x79, 0x66, 0x97, 0x9e, 0x3e, 0xb3, 0x4b, 0x2f, 0x9e, 0xd9, 0xd6, 0xf7, 0xa9, 0x6d, + 0xfd, 0x9c, 0xda, 0xd6, 0x6f, 0xa9, 0x6d, 0x3d, 0x49, 0x6d, 0xeb, 0xcf, 0xd4, 0xb6, 0xfe, 0x4e, + 0xed, 0xd2, 0x8b, 0xd4, 0xb6, 0x7e, 0x78, 0x6e, 0x97, 0x9e, 0x3c, 0xb7, 0x4b, 0x4f, 0x9f, 0xdb, + 0xa5, 0x6f, 0x57, 0xb3, 0x4f, 0xe2, 0xe1, 0x8a, 0x4e, 0xdd, 0x47, 0xff, 0x06, 0x00, 0x00, 0xff, + 0xff, 0xbb, 0xe3, 0x37, 0x9a, 0x41, 0x0b, 0x00, 0x00, } func (this *TracepointInfo) Equal(that interface{}) bool { @@ -1007,39 +857,6 @@ func (this *TracepointInfo) Equal(that interface{}) bool { } return true } -func (this *FileSourceInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*FileSourceInfo) - if !ok { - that2, ok := that.(FileSourceInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.ID.Equal(that1.ID) { - return false - } - if !this.FileSource.Equal(that1.FileSource) { - return false - } - if this.Name != that1.Name { - return false - } - if this.ExpectedState != that1.ExpectedState { - return false - } - return true -} func (this *AgentTracepointStatus) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1073,39 +890,6 @@ func (this *AgentTracepointStatus) Equal(that interface{}) bool { } return true } -func (this *AgentFileSourceStatus) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*AgentFileSourceStatus) - if !ok { - that2, ok := that.(AgentFileSourceStatus) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.State != that1.State { - return false - } - if !this.Status.Equal(that1.Status) { - return false - } - if !this.ID.Equal(that1.ID) { - return false - } - if !this.AgentID.Equal(that1.AgentID) { - return false - } - return true -} func (this *TableInfo) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1151,9 +935,6 @@ func (this *TableInfo) Equal(that interface{}) bool { if this.TabletizationKey != that1.TabletizationKey { return false } - if this.MutationId != that1.MutationId { - return false - } return true } func (this *TableInfo_ColumnInfo) Equal(that interface{}) bool { @@ -1563,23 +1344,6 @@ func (this *TracepointInfo) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *FileSourceInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&storepb.FileSourceInfo{") - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - } - if this.FileSource != nil { - s = append(s, "FileSource: "+fmt.Sprintf("%#v", this.FileSource)+",\n") - } - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "ExpectedState: "+fmt.Sprintf("%#v", this.ExpectedState)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} func (this *AgentTracepointStatus) GoString() string { if this == nil { return "nil" @@ -1599,30 +1363,11 @@ func (this *AgentTracepointStatus) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *AgentFileSourceStatus) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&storepb.AgentFileSourceStatus{") - s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") - if this.Status != nil { - s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n") - } - if this.ID != nil { - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - } - if this.AgentID != nil { - s = append(s, "AgentID: "+fmt.Sprintf("%#v", this.AgentID)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} func (this *TableInfo) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 12) + s := make([]string, 0, 11) s = append(s, "&storepb.TableInfo{") s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") s = append(s, "Desc: "+fmt.Sprintf("%#v", this.Desc)+",\n") @@ -1633,7 +1378,6 @@ func (this *TableInfo) GoString() string { } s = append(s, "Tabletized: "+fmt.Sprintf("%#v", this.Tabletized)+",\n") s = append(s, "TabletizationKey: "+fmt.Sprintf("%#v", this.TabletizationKey)+",\n") - s = append(s, "MutationId: "+fmt.Sprintf("%#v", this.MutationId)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1865,7 +1609,7 @@ func (m *TracepointInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FileSourceInfo) Marshal() (dAtA []byte, err error) { +func (m *AgentTracepointStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1875,31 +1619,19 @@ func (m *FileSourceInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *FileSourceInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *AgentTracepointStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *FileSourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AgentTracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ExpectedState != 0 { - i = encodeVarintStore(dAtA, i, uint64(m.ExpectedState)) - i-- - dAtA[i] = 0x20 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintStore(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - } - if m.FileSource != nil { + if m.AgentID != nil { { - size, err := m.FileSource.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1907,7 +1639,7 @@ func (m *FileSourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintStore(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } if m.ID != nil { { @@ -1919,12 +1651,29 @@ func (m *FileSourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintStore(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStore(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.State != 0 { + i = encodeVarintStore(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *AgentTracepointStatus) Marshal() (dAtA []byte, err error) { +func (m *TableInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1934,151 +1683,16 @@ func (m *AgentTracepointStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AgentTracepointStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *TableInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AgentTracepointStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TableInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.AgentID != nil { - { - size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStore(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.ID != nil { - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStore(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStore(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.State != 0 { - i = encodeVarintStore(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *AgentFileSourceStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AgentFileSourceStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AgentFileSourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AgentID != nil { - { - size, err := m.AgentID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStore(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.ID != nil { - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStore(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStore(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.State != 0 { - i = encodeVarintStore(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TableInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TableInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.MutationId) > 0 { - i -= len(m.MutationId) - copy(dAtA[i:], m.MutationId) - i = encodeVarintStore(dAtA, i, uint64(len(m.MutationId))) - i-- - dAtA[i] = 0x42 - } if len(m.Desc) > 0 { i -= len(m.Desc) copy(dAtA[i:], m.Desc) @@ -2638,30 +2252,6 @@ func (m *TracepointInfo) Size() (n int) { return n } -func (m *FileSourceInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovStore(uint64(l)) - } - if m.FileSource != nil { - l = m.FileSource.Size() - n += 1 + l + sovStore(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovStore(uint64(l)) - } - if m.ExpectedState != 0 { - n += 1 + sovStore(uint64(m.ExpectedState)) - } - return n -} - func (m *AgentTracepointStatus) Size() (n int) { if m == nil { return 0 @@ -2686,30 +2276,6 @@ func (m *AgentTracepointStatus) Size() (n int) { return n } -func (m *AgentFileSourceStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.State != 0 { - n += 1 + sovStore(uint64(m.State)) - } - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovStore(uint64(l)) - } - if m.ID != nil { - l = m.ID.Size() - n += 1 + l + sovStore(uint64(l)) - } - if m.AgentID != nil { - l = m.AgentID.Size() - n += 1 + l + sovStore(uint64(l)) - } - return n -} - func (m *TableInfo) Size() (n int) { if m == nil { return 0 @@ -2743,10 +2309,6 @@ func (m *TableInfo) Size() (n int) { if l > 0 { n += 1 + l + sovStore(uint64(l)) } - l = len(m.MutationId) - if l > 0 { - n += 1 + l + sovStore(uint64(l)) - } return n } @@ -2992,19 +2554,6 @@ func (this *TracepointInfo) String() string { }, "") return s } -func (this *FileSourceInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FileSourceInfo{`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `FileSource:` + strings.Replace(fmt.Sprintf("%v", this.FileSource), "FileSourceDeployment", "ir.FileSourceDeployment", 1) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ExpectedState:` + fmt.Sprintf("%v", this.ExpectedState) + `,`, - `}`, - }, "") - return s -} func (this *AgentTracepointStatus) String() string { if this == nil { return "nil" @@ -3018,19 +2567,6 @@ func (this *AgentTracepointStatus) String() string { }, "") return s } -func (this *AgentFileSourceStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AgentFileSourceStatus{`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "statuspb.Status", 1) + `,`, - `ID:` + strings.Replace(fmt.Sprintf("%v", this.ID), "UUID", "uuidpb.UUID", 1) + `,`, - `AgentID:` + strings.Replace(fmt.Sprintf("%v", this.AgentID), "UUID", "uuidpb.UUID", 1) + `,`, - `}`, - }, "") - return s -} func (this *TableInfo) String() string { if this == nil { return "nil" @@ -3048,7 +2584,6 @@ func (this *TableInfo) String() string { `Tabletized:` + fmt.Sprintf("%v", this.Tabletized) + `,`, `TabletizationKey:` + fmt.Sprintf("%v", this.TabletizationKey) + `,`, `Desc:` + fmt.Sprintf("%v", this.Desc) + `,`, - `MutationId:` + fmt.Sprintf("%v", this.MutationId) + `,`, `}`, }, "") return s @@ -3405,7 +2940,7 @@ func (m *TracepointInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *FileSourceInfo) Unmarshal(dAtA []byte) error { +func (m *AgentTracepointStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3428,17 +2963,17 @@ func (m *FileSourceInfo) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FileSourceInfo: wiretype end group for non-group") + return fmt.Errorf("proto: AgentTracepointStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FileSourceInfo: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AgentTracepointStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - var msglen int + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStore @@ -3448,31 +2983,14 @@ func (m *FileSourceInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.State |= statuspb.LifeCycleState(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthStore - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStore - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ID == nil { - m.ID = &uuidpb.UUID{} - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FileSource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3499,18 +3017,18 @@ func (m *FileSourceInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FileSource == nil { - m.FileSource = &ir.FileSourceDeployment{} + if m.Status == nil { + m.Status = &statuspb.Status{} } - if err := m.FileSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStore @@ -3520,29 +3038,33 @@ func (m *FileSourceInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthStore } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthStore } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.ID == nil { + m.ID = &uuidpb.UUID{} + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedState", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) } - m.ExpectedState = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowStore @@ -3552,353 +3074,16 @@ func (m *FileSourceInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExpectedState |= statuspb.LifeCycleState(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipStore(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLengthStore } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStore - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AgentTracepointStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentTracepointStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentTracepointStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= statuspb.LifeCycleState(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStore - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStore - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &statuspb.Status{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStore - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStore - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ID == nil { - m.ID = &uuidpb.UUID{} - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStore - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStore - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AgentID == nil { - m.AgentID = &uuidpb.UUID{} - } - if err := m.AgentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStore(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStore - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AgentFileSourceStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AgentFileSourceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AgentFileSourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= statuspb.LifeCycleState(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStore - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStore - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &statuspb.Status{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStore - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStore - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ID == nil { - m.ID = &uuidpb.UUID{} - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStore - } - postIndex := iNdEx + msglen - if postIndex < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthStore } if postIndex > l { @@ -4149,38 +3334,6 @@ func (m *TableInfo) Unmarshal(dAtA []byte) error { } m.Desc = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MutationId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStore - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStore - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStore - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MutationId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStore(dAtA[iNdEx:]) diff --git a/src/vizier/services/metadata/storepb/store.proto b/src/vizier/services/metadata/storepb/store.proto index 4e1144a497f..975b04d3685 100644 --- a/src/vizier/services/metadata/storepb/store.proto +++ b/src/vizier/services/metadata/storepb/store.proto @@ -26,7 +26,6 @@ import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "src/api/proto/uuidpb/uuid.proto"; import "src/carnot/planner/dynamic_tracing/ir/logicalpb/logical.proto"; -import "src/carnot/planner/file_source/ir/logical.proto"; import "src/common/base/statuspb/status.proto"; import "src/shared/k8s/metadatapb/metadata.proto"; import "src/shared/types/typespb/types.proto"; @@ -47,18 +46,6 @@ message TracepointInfo { px.statuspb.LifeCycleState expected_state = 4; } -// Information about the status of a specific file source -message FileSourceInfo { - uuidpb.UUID id = 1 [ (gogoproto.customname) = "ID" ]; - // The file source deployment. - px.carnot.planner.file_source.ir.FileSourceDeployment file_source = 2; - // The name of the file source, not unique. - string name = 3; - // The desired state of the file source, either running or terminated. The actual - // state of the file source is derived by the states of the individual agent file sources. - px.statuspb.LifeCycleState expected_state = 4; -} - // The agent's registration status for a particular tracepoint. message AgentTracepointStatus { // The state of the tracepoint. @@ -69,16 +56,6 @@ message AgentTracepointStatus { uuidpb.UUID agent_id = 4 [ (gogoproto.customname) = "AgentID" ]; } -// The agent's registration status for a particular file source. -message AgentFileSourceStatus { - // The state of the file source. - px.statuspb.LifeCycleState state = 1; - // The status of the file source, specified if the state of the file source is not healthy. - px.statuspb.Status status = 2; - uuidpb.UUID id = 3 [ (gogoproto.customname) = "ID" ]; - uuidpb.UUID agent_id = 4 [ (gogoproto.customname) = "AgentID" ]; -} - // TableInfo contains info about the table in Vizier. message TableInfo { // Name of the table. @@ -106,8 +83,6 @@ message TableInfo { bool tabletized = 5; // The tabletization key of this schema. string tabletization_key = 6; - // ID of the mutation that created this schema, empty if unrelated to a mutation. - string mutation_id = 8; } // ComputedSchema describes the schema available on Vizier. diff --git a/src/vizier/services/query_broker/controllers/BUILD.bazel b/src/vizier/services/query_broker/controllers/BUILD.bazel index 662397ac614..2ccb9f3a1e9 100644 --- a/src/vizier/services/query_broker/controllers/BUILD.bazel +++ b/src/vizier/services/query_broker/controllers/BUILD.bazel @@ -46,7 +46,6 @@ go_library( "//src/carnot/goplanner:go_default_library", "//src/carnot/planner/compilerpb:compiler_status_pl_go_proto", "//src/carnot/planner/distributedpb:distributed_plan_pl_go_proto", - "//src/carnot/planner/file_source/ir:logical_pl_go_proto", "//src/carnot/planner/plannerpb:service_pl_go_proto", "//src/carnot/planpb:plan_pl_go_proto", "//src/carnot/queryresultspb:query_results_pl_go_proto", diff --git a/src/vizier/services/query_broker/controllers/errors.go b/src/vizier/services/query_broker/controllers/errors.go index 3ce6c74bd1b..c07c4eb4f1c 100644 --- a/src/vizier/services/query_broker/controllers/errors.go +++ b/src/vizier/services/query_broker/controllers/errors.go @@ -29,8 +29,4 @@ var ( ErrTracepointPending = errors.New("tracepoints are still pending") // ErrConfigUpdateFailed failed to send the config update request to an agent. ErrConfigUpdateFailed = errors.New("failed to update config") - // ErrFileSourceRegistrationFailed failed to register file source. to an agent. - ErrFileSourceRegistrationFailed = errors.New("failed to register file sources") - // ErrFileSourceDeletionFailed failed to delete file source. - ErrFileSourceDeletionFailed = errors.New("failed to delete file sources") ) diff --git a/src/vizier/services/query_broker/controllers/mutation_executor.go b/src/vizier/services/query_broker/controllers/mutation_executor.go index abd0e45a267..813769362da 100644 --- a/src/vizier/services/query_broker/controllers/mutation_executor.go +++ b/src/vizier/services/query_broker/controllers/mutation_executor.go @@ -30,7 +30,6 @@ import ( "px.dev/pixie/src/api/proto/uuidpb" "px.dev/pixie/src/api/proto/vizierpb" "px.dev/pixie/src/carnot/planner/distributedpb" - "px.dev/pixie/src/carnot/planner/file_source/ir" "px.dev/pixie/src/carnot/planner/plannerpb" "px.dev/pixie/src/carnot/planpb" "px.dev/pixie/src/common/base/statuspb" @@ -41,7 +40,6 @@ import ( // TracepointMap stores a map from the name to tracepoint info. type TracepointMap map[string]*TracepointInfo -type FileSourceMap map[string]*FileSourceInfo // MutationExecutor is the interface for running script mutations. type MutationExecutor interface { @@ -53,10 +51,8 @@ type MutationExecutor interface { type MutationExecutorImpl struct { planner Planner mdtp metadatapb.MetadataTracepointServiceClient - mdfs metadatapb.MetadataFileSourceServiceClient mdconf metadatapb.MetadataConfigServiceClient activeTracepoints TracepointMap - activeFileSources FileSourceMap outputTables []string distributedState *distributedpb.DistributedState } @@ -68,29 +64,19 @@ type TracepointInfo struct { Status *statuspb.Status } -type FileSourceInfo struct { - GlobPattern string - TableName string - ID uuid.UUID - Status *statuspb.Status -} - // NewMutationExecutor creates a new mutation executor. func NewMutationExecutor( planner Planner, mdtp metadatapb.MetadataTracepointServiceClient, - mdfs metadatapb.MetadataFileSourceServiceClient, mdconf metadatapb.MetadataConfigServiceClient, distributedState *distributedpb.DistributedState, ) MutationExecutor { return &MutationExecutorImpl{ planner: planner, mdtp: mdtp, - mdfs: mdfs, mdconf: mdconf, distributedState: distributedState, activeTracepoints: make(TracepointMap), - activeFileSources: make(FileSourceMap), } } @@ -150,12 +136,6 @@ func (m *MutationExecutorImpl) Execute(ctx context.Context, req *vizierpb.Execut Names: make([]string, 0), } configmapReqs := make([]*metadatapb.UpdateConfigRequest, 0) - fileSourceReqs := &metadatapb.RegisterFileSourceRequest{ - Requests: make([]*ir.FileSourceDeployment, 0), - } - deleteFileSourcesReq := &metadatapb.RemoveFileSourceRequest{ - Names: make([]string, 0), - } outputTablesMap := make(map[string]bool) // TODO(zasgar): We should make sure that we don't simultaneously add and delete the tracepoint. @@ -197,34 +177,6 @@ func (m *MutationExecutorImpl) Execute(ctx context.Context, req *vizierpb.Execut AgentPodName: mut.ConfigUpdate.AgentPodName, }) } - case *plannerpb.CompileMutation_FileSource: - { - name := mut.FileSource.GlobPattern - tableName := mut.FileSource.TableName - fileSourceReqs.Requests = append(fileSourceReqs.Requests, &ir.FileSourceDeployment{ - Name: name, - GlobPattern: name, - TableName: tableName, - TTL: mut.FileSource.TTL, - }) - if _, ok := m.activeFileSources[name]; ok { - return nil, fmt.Errorf("file source with name '%s', already used", name) - } - // TODO(ddelnano): Add unit tests that would have caught the bug with the - // file source output table issue. The line that caused the bug is left commented below: - // outputTablesMap[name] = true - outputTablesMap[tableName] = true - - m.activeFileSources[name] = &FileSourceInfo{ - GlobPattern: mut.FileSource.GlobPattern, - ID: uuid.Nil, - Status: nil, - } - } - case *plannerpb.CompileMutation_DeleteFileSource: - { - deleteFileSourcesReq.Names = append(deleteFileSourcesReq.Names, mut.DeleteFileSource.GlobPattern) - } } } @@ -276,44 +228,6 @@ func (m *MutationExecutorImpl) Execute(ctx context.Context, req *vizierpb.Execut } } - if len(fileSourceReqs.Requests) > 0 { - resp, err := m.mdfs.RegisterFileSource(ctx, fileSourceReqs) - if err != nil { - log.WithError(err). - Errorf("Failed to register file sources") - return nil, ErrFileSourceRegistrationFailed - } - if resp.Status != nil && resp.Status.ErrCode != statuspb.OK { - log.WithField("status", resp.Status.String()). - Errorf("Failed to register file sources with bad status") - return resp.Status, ErrFileSourceRegistrationFailed - } - - // Update the internal stat of the file sources. - for _, fs := range resp.FileSources { - id := utils.UUIDFromProtoOrNil(fs.ID) - m.activeFileSources[fs.Name].ID = id - m.activeFileSources[fs.Name].Status = fs.Status - } - } - if len(deleteFileSourcesReq.Names) > 0 { - delResp, err := m.mdfs.RemoveFileSource(ctx, deleteFileSourcesReq) - if err != nil { - log.WithError(err). - Errorf("Failed to delete tracepoints") - return nil, ErrFileSourceDeletionFailed - } - if delResp.Status != nil && delResp.Status.ErrCode != statuspb.OK { - log.WithField("status", delResp.Status.String()). - Errorf("Failed to delete tracepoints with bad status") - return delResp.Status, ErrFileSourceDeletionFailed - } - // Remove the tracepoints we considered deleted. - for _, fsName := range deleteFileSourcesReq.Names { - delete(m.activeFileSources, fsName) - } - } - m.outputTables = make([]string, 0) for k := range outputTablesMap { m.outputTables = append(m.outputTables, k) @@ -330,12 +244,6 @@ func (m *MutationExecutorImpl) MutationInfo(ctx context.Context) (*vizierpb.Muta for _, tp := range m.activeTracepoints { tpReq.IDs = append(tpReq.IDs, utils.ProtoFromUUID(tp.ID)) } - fsReq := &metadatapb.GetFileSourceInfoRequest{ - IDs: make([]*uuidpb.UUID, 0), - } - for _, fs := range m.activeFileSources { - fsReq.IDs = append(fsReq.IDs, utils.ProtoFromUUID(fs.ID)) - } aCtx, err := authcontext.FromContext(ctx) if err != nil { return nil, err @@ -346,14 +254,9 @@ func (m *MutationExecutorImpl) MutationInfo(ctx context.Context) (*vizierpb.Muta if err != nil { return nil, err } - fsResp, err := m.mdfs.GetFileSourceInfo(ctx, fsReq) - if err != nil { - return nil, err - } - tps := len(tpResp.Tracepoints) mutationInfo := &vizierpb.MutationInfo{ Status: &vizierpb.Status{Code: 0}, - States: make([]*vizierpb.MutationInfo_MutationState, tps+len(fsResp.FileSources)), + States: make([]*vizierpb.MutationInfo_MutationState, len(tpResp.Tracepoints)), } tpReady := true @@ -368,18 +271,6 @@ func (m *MutationExecutorImpl) MutationInfo(ctx context.Context) (*vizierpb.Muta } } - fsReady := true - for idx, fs := range fsResp.FileSources { - mutationInfo.States[idx+tps] = &vizierpb.MutationInfo_MutationState{ - ID: utils.UUIDFromProtoOrNil(fs.ID).String(), - State: convertLifeCycleStateToVizierLifeCycleState(fs.State), - Name: fs.Name, - } - if fs.State != statuspb.RUNNING_STATE { - fsReady = false - } - } - if !tpReady { mutationInfo.Status = &vizierpb.Status{ Code: int32(codes.Unavailable), @@ -388,14 +279,6 @@ func (m *MutationExecutorImpl) MutationInfo(ctx context.Context) (*vizierpb.Muta return mutationInfo, nil } - if !fsReady { - mutationInfo.Status = &vizierpb.Status{ - Code: int32(codes.Unavailable), - Message: "file source installation in progress", - } - return mutationInfo, nil - } - if !m.isSchemaReady() { mutationInfo.Status = &vizierpb.Status{ Code: int32(codes.Unavailable), diff --git a/src/vizier/services/query_broker/controllers/query_executor.go b/src/vizier/services/query_broker/controllers/query_executor.go index 8897034bcaa..4d8ff7b7b6b 100644 --- a/src/vizier/services/query_broker/controllers/query_executor.go +++ b/src/vizier/services/query_broker/controllers/query_executor.go @@ -89,7 +89,6 @@ type DataPrivacy interface { // MutationExecFactory is a function that creates a new MutationExecutorImpl. type MutationExecFactory func(Planner, metadatapb.MetadataTracepointServiceClient, - metadatapb.MetadataFileSourceServiceClient, metadatapb.MetadataConfigServiceClient, *distributedpb.DistributedState) MutationExecutor @@ -101,7 +100,6 @@ type QueryExecutorImpl struct { dataPrivacy DataPrivacy natsConn *nats.Conn mdtp metadatapb.MetadataTracepointServiceClient - mdfs metadatapb.MetadataFileSourceServiceClient mdconf metadatapb.MetadataConfigServiceClient resultForwarder QueryResultForwarder planner Planner @@ -129,7 +127,6 @@ func NewQueryExecutorFromServer(s *Server, mutExecFactory MutationExecFactory) Q s.dataPrivacy, s.natsConn, s.mdtp, - s.mdfs, s.mdconf, s.resultForwarder, s.planner, @@ -145,7 +142,6 @@ func NewQueryExecutor( dataPrivacy DataPrivacy, natsConn *nats.Conn, mdtp metadatapb.MetadataTracepointServiceClient, - mdfs metadatapb.MetadataFileSourceServiceClient, mdconf metadatapb.MetadataConfigServiceClient, resultForwarder QueryResultForwarder, planner Planner, @@ -158,7 +154,6 @@ func NewQueryExecutor( dataPrivacy: dataPrivacy, natsConn: natsConn, mdtp: mdtp, - mdfs: mdfs, mdconf: mdconf, resultForwarder: resultForwarder, planner: planner, @@ -297,7 +292,7 @@ func (q *QueryExecutorImpl) getPlanOpts(queryStr string) (*planpb.PlanOptions, e } func (q *QueryExecutorImpl) runMutation(ctx context.Context, resultCh chan<- *vizierpb.ExecuteScriptResponse, req *vizierpb.ExecuteScriptRequest, planOpts *planpb.PlanOptions, distributedState *distributedpb.DistributedState) error { - mutationExec := q.mutationExecFactory(q.planner, q.mdtp, q.mdfs, q.mdconf, distributedState) + mutationExec := q.mutationExecFactory(q.planner, q.mdtp, q.mdconf, distributedState) s, err := mutationExec.Execute(ctx, req, planOpts) if err != nil { diff --git a/src/vizier/services/query_broker/controllers/query_executor_test.go b/src/vizier/services/query_broker/controllers/query_executor_test.go index 710b54c0f28..1bbe5b35b19 100644 --- a/src/vizier/services/query_broker/controllers/query_executor_test.go +++ b/src/vizier/services/query_broker/controllers/query_executor_test.go @@ -409,7 +409,7 @@ func runTestCase(t *testing.T, test *queryExecTestCase) { } dp := &fakeDataPrivacy{} - queryExec := controllers.NewQueryExecutor("qb_address", "qb_hostname", at, dp, nc, nil, nil, nil, rf, planner, test.MutExecFactory) + queryExec := controllers.NewQueryExecutor("qb_address", "qb_hostname", at, dp, nc, nil, nil, rf, planner, test.MutExecFactory) consumer := newTestConsumer(test.ConsumeErrs) assert.Equal(t, test.QueryExecExpectedRunError, queryExec.Run(context.Background(), test.Req, consumer)) @@ -806,7 +806,7 @@ func buildMutationFailedQueryTestCase(t *testing.T) queryExecTestCase { QueryExecExpectedWaitError: err, StreamResultsErr: err, StreamResultsCallExpected: true, - MutExecFactory: func(planner controllers.Planner, client metadatapb.MetadataTracepointServiceClient, client2 metadatapb.MetadataFileSourceServiceClient, client3 metadatapb.MetadataConfigServiceClient, state *distributedpb.DistributedState) controllers.MutationExecutor { + MutExecFactory: func(planner controllers.Planner, client metadatapb.MetadataTracepointServiceClient, client2 metadatapb.MetadataConfigServiceClient, state *distributedpb.DistributedState) controllers.MutationExecutor { return &fakeMutationExecutor{ MutInfo: mutInfo, ExecuteStatus: nil, diff --git a/src/vizier/services/query_broker/controllers/server.go b/src/vizier/services/query_broker/controllers/server.go index fae5d15ad91..9626a8046d0 100644 --- a/src/vizier/services/query_broker/controllers/server.go +++ b/src/vizier/services/query_broker/controllers/server.go @@ -82,7 +82,6 @@ type Server struct { healthcheckQuitOnce sync.Once mdtp metadatapb.MetadataTracepointServiceClient - mdfs metadatapb.MetadataFileSourceServiceClient mdconf metadatapb.MetadataConfigServiceClient resultForwarder QueryResultForwarder @@ -96,8 +95,9 @@ type QueryExecutorFactory func(*Server, MutationExecFactory) QueryExecutor // NewServer creates GRPC handlers. func NewServer(env querybrokerenv.QueryBrokerEnv, agentsTracker AgentsTracker, dataPrivacy DataPrivacy, - mds metadatapb.MetadataTracepointServiceClient, mdfs metadatapb.MetadataFileSourceServiceClient, mdconf metadatapb.MetadataConfigServiceClient, - natsConn *nats.Conn, queryExecFactory QueryExecutorFactory) (*Server, error) { + mds metadatapb.MetadataTracepointServiceClient, mdconf metadatapb.MetadataConfigServiceClient, + natsConn *nats.Conn, queryExecFactory QueryExecutorFactory, +) (*Server, error) { var udfInfo udfspb.UDFInfo if err := loadUDFInfo(&udfInfo); err != nil { return nil, err @@ -107,7 +107,7 @@ func NewServer(env querybrokerenv.QueryBrokerEnv, agentsTracker AgentsTracker, d return nil, err } - return NewServerWithForwarderAndPlanner(env, agentsTracker, dataPrivacy, NewQueryResultForwarder(), mds, mdfs, mdconf, + return NewServerWithForwarderAndPlanner(env, agentsTracker, dataPrivacy, NewQueryResultForwarder(), mds, mdconf, natsConn, c, queryExecFactory) } @@ -117,7 +117,6 @@ func NewServerWithForwarderAndPlanner(env querybrokerenv.QueryBrokerEnv, dataPrivacy DataPrivacy, resultForwarder QueryResultForwarder, mds metadatapb.MetadataTracepointServiceClient, - mdfs metadatapb.MetadataFileSourceServiceClient, mdconf metadatapb.MetadataConfigServiceClient, natsConn *nats.Conn, planner Planner, @@ -130,7 +129,6 @@ func NewServerWithForwarderAndPlanner(env querybrokerenv.QueryBrokerEnv, resultForwarder: resultForwarder, natsConn: natsConn, mdtp: mds, - mdfs: mdfs, mdconf: mdconf, planner: planner, queryExecFactory: queryExecFactory, diff --git a/src/vizier/services/query_broker/controllers/server_test.go b/src/vizier/services/query_broker/controllers/server_test.go index 2d11071385d..1bd568c2631 100644 --- a/src/vizier/services/query_broker/controllers/server_test.go +++ b/src/vizier/services/query_broker/controllers/server_test.go @@ -267,7 +267,7 @@ func TestCheckHealth(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(nil, nil, dp, nil, nil, nil, nil, nil, nil, queryExecFactory) + s, err := controllers.NewServerWithForwarderAndPlanner(nil, nil, dp, nil, nil, nil, nil, nil, queryExecFactory) require.NoError(t, err) err = s.CheckHealth(context.Background()) @@ -392,7 +392,7 @@ func TestExecuteScript(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(nil, nil, dp, nil, nil, nil, nil, nil, nil, queryExecFactory) + s, err := controllers.NewServerWithForwarderAndPlanner(nil, nil, dp, nil, nil, nil, nil, nil, queryExecFactory) require.NoError(t, err) // Set up mocks. @@ -456,7 +456,7 @@ func TestTransferResultChunk_AgentStreamComplete(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nil, nc, nil, nil) + s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nc, nil, nil) require.NoError(t, err) defer s.Close() @@ -547,7 +547,7 @@ func TestTransferResultChunk_AgentClosedPrematurely(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nil, nc, nil, nil) + s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nc, nil, nil) require.NoError(t, err) defer s.Close() @@ -631,7 +631,7 @@ func TestTransferResultChunk_AgentStreamFailed(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nil, nc, nil, nil) + s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nc, nil, nil) require.NoError(t, err) defer s.Close() @@ -709,7 +709,7 @@ func TestTransferResultChunk_ClientStreamCancelled(t *testing.T) { } dp := &fakeDataPrivacy{} - s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nil, nc, nil, nil) + s, err := controllers.NewServerWithForwarderAndPlanner(env, &at, dp, &rf, nil, nil, nc, nil, nil) require.NoError(t, err) defer s.Close() diff --git a/src/vizier/services/query_broker/query_broker_server.go b/src/vizier/services/query_broker/query_broker_server.go index f4bcff7ff2b..3912ea599af 100644 --- a/src/vizier/services/query_broker/query_broker_server.go +++ b/src/vizier/services/query_broker/query_broker_server.go @@ -144,7 +144,6 @@ func main() { mdsClient := metadatapb.NewMetadataServiceClient(mdsConn) mdtpClient := metadatapb.NewMetadataTracepointServiceClient(mdsConn) - mdfsClient := metadatapb.NewMetadataFileSourceServiceClient(mdsConn) mdconfClient := metadatapb.NewMetadataConfigServiceClient(mdsConn) csClient := metadatapb.NewCronScriptStoreServiceClient(mdsConn) @@ -174,7 +173,7 @@ func main() { agentTracker := tracker.NewAgents(mdsClient, viper.GetString("jwt_signing_key")) agentTracker.Start() defer agentTracker.Stop() - svr, err := controllers.NewServer(env, agentTracker, dataPrivacy, mdtpClient, mdfsClient, mdconfClient, natsConn, controllers.NewQueryExecutorFromServer) + svr, err := controllers.NewServer(env, agentTracker, dataPrivacy, mdtpClient, mdconfClient, natsConn, controllers.NewQueryExecutorFromServer) if err != nil { log.WithError(err).Fatal("Failed to initialize GRPC server funcs.") } diff --git a/src/vizier/services/query_broker/tracker/agents_info.go b/src/vizier/services/query_broker/tracker/agents_info.go index f36ebe8dd06..f881c974832 100644 --- a/src/vizier/services/query_broker/tracker/agents_info.go +++ b/src/vizier/services/query_broker/tracker/agents_info.go @@ -128,7 +128,7 @@ func (a *AgentsInfoImpl) UpdateAgentsInfo(update *metadatapb.AgentUpdatesRespons } else { // this is a Kelvin kelvinGRPCAddress := agent.Info.IPAddress - carnotInfoMap[agentUUID] = makeKelvinCarnotInfo(agentUUID, kelvinGRPCAddress, agent.ASID, agent.Info.Capabilities.StoresData) + carnotInfoMap[agentUUID] = makeKelvinCarnotInfo(agentUUID, kelvinGRPCAddress, agent.ASID) } } // case 2: agent data info update @@ -197,14 +197,14 @@ func makeAgentCarnotInfo(agentID uuid.UUID, asid uint32, agentMetadata *distribu } } -func makeKelvinCarnotInfo(agentID uuid.UUID, grpcAddress string, asid uint32, storesData bool) *distributedpb.CarnotInfo { +func makeKelvinCarnotInfo(agentID uuid.UUID, grpcAddress string, asid uint32) *distributedpb.CarnotInfo { return &distributedpb.CarnotInfo{ QueryBrokerAddress: agentID.String(), AgentID: utils.ProtoFromUUID(agentID), ASID: asid, HasGRPCServer: true, GRPCAddress: grpcAddress, - HasDataStore: storesData, + HasDataStore: false, ProcessesData: true, AcceptsRemoteSources: true, // When we support persistent storage, Kelvins will also have MetadataInfo. diff --git a/src/vizier/services/shared/agentpb/agent.pb.go b/src/vizier/services/shared/agentpb/agent.pb.go index c7fa65e27bd..4cd57a106ea 100755 --- a/src/vizier/services/shared/agentpb/agent.pb.go +++ b/src/vizier/services/shared/agentpb/agent.pb.go @@ -56,7 +56,6 @@ func (AgentState) EnumDescriptor() ([]byte, []int) { type AgentCapabilities struct { CollectsData bool `protobuf:"varint,1,opt,name=collects_data,json=collectsData,proto3" json:"collects_data,omitempty"` - StoresData bool `protobuf:"varint,2,opt,name=stores_data,json=storesData,proto3" json:"stores_data,omitempty"` } func (m *AgentCapabilities) Reset() { *m = AgentCapabilities{} } @@ -98,13 +97,6 @@ func (m *AgentCapabilities) GetCollectsData() bool { return false } -func (m *AgentCapabilities) GetStoresData() bool { - if m != nil { - return m.StoresData - } - return false -} - type AgentParameters struct { ProfilerStackTraceSamplePeriodMS int32 `protobuf:"varint,1,opt,name=profiler_stack_trace_sample_period_ms,json=profilerStackTraceSamplePeriodMs,proto3" json:"profiler_stack_trace_sample_period_ms,omitempty"` } @@ -491,62 +483,61 @@ func init() { } var fileDescriptor_fef0af3bd5248f34 = []byte{ - // 879 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x41, 0x6f, 0xdb, 0x36, - 0x14, 0xb6, 0xe2, 0x24, 0xb6, 0x5f, 0xe2, 0xc6, 0x65, 0x83, 0xd5, 0xcb, 0x0a, 0x29, 0x70, 0x37, - 0xa0, 0xeb, 0x06, 0x79, 0xc8, 0x80, 0x6d, 0x97, 0x6d, 0xb0, 0x63, 0x77, 0x36, 0xda, 0x29, 0x06, - 0xe5, 0x64, 0xe8, 0x2e, 0x02, 0x2d, 0x31, 0x09, 0x57, 0x59, 0x12, 0x48, 0xc6, 0x28, 0x7a, 0xda, - 0x71, 0xc7, 0xfd, 0x85, 0xdd, 0xf6, 0x53, 0x76, 0xcc, 0xb1, 0x27, 0x63, 0x51, 0x76, 0xe8, 0xb1, - 0x3f, 0x61, 0xd0, 0x93, 0xdc, 0xd4, 0x2d, 0xd0, 0xe4, 0x24, 0xf2, 0x7d, 0xdf, 0xf7, 0x3e, 0xf2, - 0x7b, 0x84, 0x0d, 0xb6, 0x92, 0x7e, 0x7b, 0x26, 0x5e, 0x08, 0x2e, 0xdb, 0x8a, 0xcb, 0x99, 0xf0, - 0xb9, 0x6a, 0xab, 0x53, 0x26, 0x79, 0xd0, 0x66, 0x27, 0x3c, 0xd2, 0xc9, 0x24, 0xff, 0xda, 0x89, - 0x8c, 0x75, 0x4c, 0xac, 0xe4, 0xb9, 0x9d, 0xd3, 0xed, 0x05, 0xdd, 0xce, 0xe9, 0x36, 0xd2, 0x76, - 0xb6, 0x4f, 0xe2, 0x93, 0x18, 0xb9, 0xed, 0x6c, 0x95, 0xcb, 0x76, 0xac, 0xcc, 0x86, 0x25, 0xa2, - 0x9d, 0x23, 0x67, 0x67, 0x22, 0x48, 0x26, 0xf8, 0xc9, 0x09, 0xad, 0xa7, 0x70, 0xbb, 0x93, 0xe9, - 0xf7, 0x59, 0xc2, 0x26, 0x22, 0x14, 0x5a, 0x70, 0x45, 0xee, 0x43, 0xdd, 0x8f, 0xc3, 0x90, 0xfb, - 0x5a, 0x79, 0x01, 0xd3, 0xac, 0x69, 0xec, 0x1a, 0x0f, 0xaa, 0x74, 0x73, 0x51, 0xec, 0x31, 0xcd, - 0x88, 0x05, 0x1b, 0x4a, 0xc7, 0x92, 0x17, 0x94, 0x15, 0xa4, 0x40, 0x5e, 0xca, 0x08, 0xad, 0x3f, - 0x0c, 0xd8, 0xc2, 0xde, 0x23, 0x26, 0xd9, 0x94, 0x6b, 0x2e, 0x15, 0x39, 0x83, 0xcf, 0x12, 0x19, - 0x1f, 0x8b, 0x90, 0x4b, 0x4f, 0x69, 0xe6, 0x3f, 0xf3, 0xb4, 0x64, 0x3e, 0xf7, 0x14, 0x9b, 0x26, - 0x21, 0xf7, 0x12, 0x2e, 0x45, 0x1c, 0x78, 0x53, 0x85, 0x8e, 0x6b, 0xdd, 0x4f, 0xd3, 0xb9, 0xb5, - 0x3b, 0x2a, 0x04, 0x6e, 0xc6, 0x1f, 0x67, 0x74, 0x17, 0xd9, 0x23, 0x24, 0xff, 0xec, 0xd2, 0xdd, - 0xe4, 0xc3, 0x0c, 0xd5, 0xfa, 0x6f, 0x05, 0x6a, 0x78, 0x94, 0x61, 0x74, 0x1c, 0x93, 0x6f, 0xa1, - 0x8a, 0x99, 0x79, 0x22, 0x40, 0x9f, 0x8d, 0xbd, 0x2d, 0x3b, 0x79, 0x6e, 0xe7, 0xe1, 0xd8, 0x87, - 0x87, 0xc3, 0x5e, 0x77, 0x23, 0x9d, 0x5b, 0x95, 0x5c, 0xd1, 0xa3, 0x15, 0x64, 0x0f, 0x03, 0xf2, - 0x08, 0x6a, 0xa7, 0xb1, 0xd2, 0x9e, 0x88, 0x8e, 0x63, 0xbc, 0xf0, 0xc6, 0xde, 0xe7, 0xf6, 0x35, - 0x83, 0xb1, 0x07, 0xb1, 0x42, 0x5b, 0x5a, 0x3d, 0x2d, 0x56, 0xe4, 0x4b, 0x00, 0x91, 0x78, 0x2c, - 0x08, 0x24, 0x57, 0xaa, 0x59, 0xde, 0x35, 0x1e, 0xd4, 0xba, 0xf5, 0x74, 0x6e, 0xd5, 0x86, 0xa3, - 0x4e, 0x5e, 0xa4, 0x35, 0x91, 0x14, 0x4b, 0x72, 0x04, 0x9b, 0xfe, 0x5b, 0xd3, 0x69, 0xae, 0xa2, - 0xf1, 0xde, 0xb5, 0xc6, 0xef, 0xcd, 0x95, 0x2e, 0xf5, 0x21, 0x23, 0x80, 0xe4, 0xcd, 0x64, 0x9a, - 0x6b, 0xd8, 0xf5, 0xab, 0x9b, 0x75, 0xbd, 0x9a, 0x28, 0x7d, 0xab, 0x47, 0xcb, 0x87, 0xfa, 0x63, - 0x2e, 0x23, 0x1e, 0x1e, 0x71, 0xa9, 0x44, 0x1c, 0x91, 0x26, 0x54, 0x66, 0xf9, 0x12, 0x83, 0xae, - 0xd3, 0xc5, 0x96, 0x7c, 0x02, 0xb5, 0x29, 0xfb, 0x2d, 0x96, 0x9e, 0xe4, 0x33, 0x8c, 0xb2, 0x4e, - 0xab, 0x58, 0xa0, 0x7c, 0x86, 0xa0, 0x88, 0x0a, 0xb0, 0x5c, 0x80, 0x59, 0x81, 0xf2, 0x59, 0xeb, - 0x95, 0x01, 0xd5, 0x45, 0xa6, 0x64, 0x07, 0x30, 0xd5, 0x88, 0x4d, 0x39, 0x3a, 0xd4, 0xe8, 0x9b, - 0x3d, 0xf9, 0x18, 0xaa, 0x49, 0x1c, 0x78, 0x88, 0xad, 0x20, 0x56, 0x49, 0xe2, 0xc0, 0xc9, 0xa0, - 0xfb, 0x50, 0xc9, 0x07, 0x99, 0x14, 0xe9, 0x43, 0x3a, 0xb7, 0xd6, 0xb1, 0xeb, 0x88, 0xae, 0xe3, - 0x9c, 0x12, 0xf2, 0x08, 0xd6, 0x9f, 0xe1, 0x6d, 0x8a, 0xc4, 0xed, 0x6b, 0xb3, 0x59, 0xba, 0x3c, - 0x2d, 0xd4, 0xe4, 0x3b, 0x68, 0xe6, 0x2b, 0xef, 0x94, 0xb3, 0x80, 0x4b, 0xe5, 0x89, 0x48, 0x69, - 0x16, 0x86, 0x3c, 0xc0, 0xd4, 0xab, 0xf4, 0xa3, 0x1c, 0x1f, 0xe4, 0xf0, 0x70, 0x81, 0xb6, 0xe6, - 0x06, 0xac, 0x61, 0xde, 0xe4, 0x07, 0x58, 0xc5, 0x47, 0x97, 0x3f, 0xd7, 0x87, 0x37, 0x9b, 0x12, - 0xbe, 0x3a, 0xd4, 0x91, 0x6f, 0xe0, 0x96, 0x2f, 0x39, 0xd3, 0xdc, 0xd3, 0x62, 0xca, 0xbd, 0x48, - 0x61, 0x22, 0xe5, 0x6e, 0x23, 0x9d, 0x5b, 0x9b, 0xfb, 0x88, 0x8c, 0xc5, 0x94, 0x3b, 0x2e, 0xdd, - 0xf4, 0xaf, 0x76, 0x8a, 0xfc, 0x08, 0xb7, 0x43, 0xa6, 0x74, 0x76, 0x72, 0xa9, 0x27, 0x9c, 0xe9, - 0x4c, 0x5a, 0x46, 0xe9, 0x9d, 0x74, 0x6e, 0x6d, 0x3d, 0x61, 0x4a, 0x0f, 0x16, 0x98, 0xe3, 0xd2, - 0xad, 0x70, 0xa9, 0xa0, 0xc8, 0x3d, 0x58, 0x65, 0x4a, 0x04, 0x18, 0x61, 0xbd, 0x5b, 0x4d, 0xe7, - 0xd6, 0x6a, 0xc7, 0x1d, 0xf6, 0x28, 0x56, 0x5b, 0x7f, 0x19, 0xb0, 0x81, 0x47, 0x75, 0x35, 0xd3, - 0x67, 0x8a, 0x1c, 0xc0, 0xdd, 0x48, 0x79, 0x4a, 0x44, 0x3e, 0xf7, 0x96, 0x7d, 0xf1, 0xe6, 0xe5, - 0x6e, 0x33, 0x9d, 0x5b, 0xdb, 0x8e, 0xeb, 0x66, 0x8c, 0x25, 0x6f, 0xba, 0x1d, 0xa9, 0xf7, 0xab, - 0xa4, 0x03, 0x6b, 0x4a, 0x33, 0x9d, 0x3f, 0x80, 0x5b, 0x7b, 0x5f, 0xdc, 0x2c, 0xb8, 0xec, 0x34, - 0x9c, 0xe6, 0xca, 0x87, 0x2f, 0x00, 0xae, 0x8a, 0xe4, 0x2e, 0xdc, 0xe9, 0xfc, 0xd4, 0x77, 0xc6, - 0x9e, 0x3b, 0xee, 0x8c, 0xfb, 0xde, 0xa1, 0xf3, 0xd8, 0x39, 0xf8, 0xc5, 0x69, 0x94, 0xde, 0x05, - 0x06, 0xfd, 0xce, 0x93, 0xf1, 0xe0, 0x69, 0xc3, 0x20, 0xf7, 0xa0, 0xb9, 0xac, 0xa0, 0x7d, 0x77, - 0x74, 0xe0, 0xb8, 0xc3, 0xa3, 0x7e, 0x63, 0xe5, 0x5d, 0xb4, 0x37, 0x74, 0xf7, 0x0f, 0x1c, 0xa7, - 0xbf, 0x3f, 0xee, 0xf7, 0x1a, 0xe5, 0xee, 0xf7, 0xe7, 0x17, 0x66, 0xe9, 0xe5, 0x85, 0x59, 0x7a, - 0x7d, 0x61, 0x1a, 0xbf, 0xa7, 0xa6, 0xf1, 0x77, 0x6a, 0x1a, 0xff, 0xa4, 0xa6, 0x71, 0x9e, 0x9a, - 0xc6, 0xbf, 0xa9, 0x69, 0xbc, 0x4a, 0xcd, 0xd2, 0xeb, 0xd4, 0x34, 0xfe, 0xbc, 0x34, 0x4b, 0xe7, - 0x97, 0x66, 0xe9, 0xe5, 0xa5, 0x59, 0xfa, 0xb5, 0x52, 0xfc, 0x81, 0x4c, 0xd6, 0xf1, 0x37, 0xfe, - 0xeb, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfa, 0x58, 0xba, 0x29, 0x6d, 0x06, 0x00, 0x00, + // 864 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0xc6, 0x49, 0x6c, 0x4f, 0xe2, 0xc6, 0x9d, 0x46, 0xd4, 0x84, 0x6a, 0x37, 0x72, 0x41, + 0x2a, 0x05, 0xad, 0x51, 0x90, 0xa0, 0x17, 0x40, 0x76, 0xec, 0x62, 0xab, 0x65, 0x63, 0xcd, 0x3a, + 0x41, 0x70, 0x19, 0x8d, 0x77, 0x27, 0xc9, 0xd0, 0xf5, 0xee, 0x68, 0x66, 0x62, 0x55, 0x3d, 0x71, + 0xe4, 0xc8, 0x5f, 0xe0, 0xc6, 0x4f, 0xe1, 0x98, 0x63, 0x4f, 0x16, 0xd9, 0x70, 0xe8, 0xb1, 0x3f, + 0x01, 0xed, 0xdb, 0x75, 0x53, 0xb7, 0x52, 0x93, 0xd3, 0xbe, 0x79, 0xdf, 0xf7, 0xbd, 0x37, 0xf3, + 0xbd, 0x27, 0x1b, 0xb9, 0x5a, 0x05, 0xed, 0x99, 0x78, 0x21, 0xb8, 0x6a, 0x6b, 0xae, 0x66, 0x22, + 0xe0, 0xba, 0xad, 0x4f, 0x99, 0xe2, 0x61, 0x9b, 0x9d, 0xf0, 0xd8, 0xc8, 0x49, 0xfe, 0x75, 0xa5, + 0x4a, 0x4c, 0x82, 0x1d, 0xf9, 0xdc, 0xcd, 0xe9, 0xee, 0x82, 0xee, 0xe6, 0x74, 0x17, 0x68, 0x3b, + 0xdb, 0x27, 0xc9, 0x49, 0x02, 0xdc, 0x76, 0x16, 0xe5, 0xb2, 0x1d, 0x27, 0x6b, 0xc3, 0xa4, 0x68, + 0xe7, 0xc8, 0xd9, 0x99, 0x08, 0xe5, 0x04, 0x3e, 0x39, 0xa1, 0xf5, 0x08, 0xdd, 0xee, 0x64, 0xfa, + 0x7d, 0x26, 0xd9, 0x44, 0x44, 0xc2, 0x08, 0xae, 0xf1, 0x7d, 0x54, 0x0f, 0x92, 0x28, 0xe2, 0x81, + 0xd1, 0x34, 0x64, 0x86, 0x35, 0xad, 0x5d, 0xeb, 0x41, 0x95, 0x6c, 0x2e, 0x92, 0x3d, 0x66, 0x58, + 0xeb, 0x0f, 0x0b, 0x6d, 0x81, 0x74, 0xc4, 0x14, 0x9b, 0x72, 0xc3, 0x95, 0xc6, 0x67, 0xe8, 0x33, + 0xa9, 0x92, 0x63, 0x11, 0x71, 0x45, 0xb5, 0x61, 0xc1, 0x33, 0x6a, 0x14, 0x0b, 0x38, 0xd5, 0x6c, + 0x2a, 0x23, 0x4e, 0x25, 0x57, 0x22, 0x09, 0xe9, 0x54, 0x43, 0xc1, 0xb5, 0xee, 0xa7, 0xe9, 0xdc, + 0xd9, 0x1d, 0x15, 0x02, 0x3f, 0xe3, 0x8f, 0x33, 0xba, 0x0f, 0xec, 0x11, 0x90, 0x7f, 0xf2, 0xc9, + 0xae, 0xfc, 0x30, 0x43, 0xb7, 0xfe, 0x5b, 0x41, 0x35, 0xb8, 0xca, 0x30, 0x3e, 0x4e, 0xf0, 0xb7, + 0xa8, 0x0a, 0x96, 0x50, 0x11, 0x42, 0x9f, 0x8d, 0xbd, 0x2d, 0x57, 0x3e, 0x77, 0xf3, 0xb7, 0xbb, + 0x87, 0x87, 0xc3, 0x5e, 0x77, 0x23, 0x9d, 0x3b, 0x95, 0x5c, 0xd1, 0x23, 0x15, 0x60, 0x0f, 0x43, + 0xfc, 0x18, 0xd5, 0x4e, 0x13, 0x6d, 0xa8, 0x88, 0x8f, 0x93, 0xe6, 0x0a, 0x28, 0x3f, 0x77, 0xaf, + 0xf1, 0xdd, 0x1d, 0x24, 0x1a, 0xda, 0x92, 0xea, 0x69, 0x11, 0xe1, 0x2f, 0x11, 0x12, 0x92, 0xb2, + 0x30, 0x54, 0x5c, 0xeb, 0x66, 0x79, 0xd7, 0x7a, 0x50, 0xeb, 0xd6, 0xd3, 0xb9, 0x53, 0x1b, 0x8e, + 0x3a, 0x79, 0x92, 0xd4, 0x84, 0x2c, 0x42, 0x7c, 0x84, 0x36, 0x83, 0xb7, 0xcc, 0x6f, 0xae, 0x42, + 0xe3, 0xbd, 0x6b, 0x1b, 0xbf, 0x37, 0x36, 0xb2, 0x54, 0x07, 0x8f, 0x10, 0x92, 0x6f, 0x26, 0xd3, + 0x5c, 0x83, 0xaa, 0x5f, 0xdd, 0xac, 0xea, 0xd5, 0x44, 0xc9, 0x5b, 0x35, 0x5a, 0x01, 0xaa, 0x3f, + 0xe1, 0x2a, 0xe6, 0xd1, 0x11, 0x57, 0x5a, 0x24, 0x31, 0x6e, 0xa2, 0xca, 0x2c, 0x0f, 0xc1, 0xe8, + 0x3a, 0x59, 0x1c, 0xf1, 0x27, 0xa8, 0x36, 0x65, 0xbf, 0x25, 0x8a, 0x2a, 0x3e, 0x03, 0x2b, 0xeb, + 0xa4, 0x0a, 0x09, 0xc2, 0x67, 0x00, 0x8a, 0xb8, 0x00, 0xcb, 0x05, 0x98, 0x25, 0x08, 0x9f, 0xb5, + 0x5e, 0x59, 0xa8, 0xba, 0xf0, 0x14, 0xef, 0x20, 0x70, 0x35, 0x66, 0x53, 0x0e, 0x1d, 0x6a, 0xe4, + 0xcd, 0x19, 0x7f, 0x8c, 0xaa, 0x32, 0x09, 0x29, 0x60, 0x2b, 0x80, 0x55, 0x64, 0x12, 0x7a, 0x19, + 0x74, 0x1f, 0x55, 0xf2, 0x41, 0xca, 0xc2, 0x7d, 0x94, 0xce, 0x9d, 0x75, 0xa8, 0x3a, 0x22, 0xeb, + 0x30, 0x27, 0x89, 0x1f, 0xa3, 0xf5, 0x67, 0xf0, 0x9a, 0xc2, 0x71, 0xf7, 0x5a, 0x6f, 0x96, 0x1e, + 0x4f, 0x0a, 0x35, 0x7e, 0x84, 0x9a, 0x79, 0x44, 0x4f, 0x39, 0x0b, 0xb9, 0xd2, 0x54, 0xc4, 0xda, + 0xb0, 0x28, 0xe2, 0x21, 0xb8, 0x5e, 0x25, 0x1f, 0xe5, 0xf8, 0x20, 0x87, 0x87, 0x0b, 0xb4, 0x35, + 0xb7, 0xd0, 0x1a, 0xf8, 0x8d, 0xbf, 0x47, 0xab, 0xb0, 0x74, 0xf9, 0xba, 0x3e, 0xbc, 0xd9, 0x94, + 0x60, 0xeb, 0x40, 0x87, 0xbf, 0x41, 0xb7, 0x02, 0xc5, 0x99, 0xe1, 0xd4, 0x88, 0x29, 0xa7, 0xb1, + 0x06, 0x47, 0xca, 0xdd, 0x46, 0x3a, 0x77, 0x36, 0xf7, 0x01, 0x19, 0x8b, 0x29, 0xf7, 0x7c, 0xb2, + 0x19, 0x5c, 0x9d, 0x34, 0xfe, 0x01, 0xdd, 0x8e, 0x98, 0x36, 0xd9, 0xcd, 0x95, 0x99, 0x70, 0x66, + 0x32, 0x69, 0x19, 0xa4, 0x77, 0xd2, 0xb9, 0xb3, 0xf5, 0x94, 0x69, 0x33, 0x58, 0x60, 0x9e, 0x4f, + 0xb6, 0xa2, 0xa5, 0x84, 0xc6, 0xf7, 0xd0, 0x2a, 0xd3, 0x22, 0x04, 0x0b, 0xeb, 0xdd, 0x6a, 0x3a, + 0x77, 0x56, 0x3b, 0xfe, 0xb0, 0x47, 0x20, 0xdb, 0xfa, 0xcb, 0x42, 0x1b, 0x70, 0x55, 0xdf, 0x30, + 0x73, 0xa6, 0xf1, 0x01, 0xba, 0x1b, 0x6b, 0xaa, 0x45, 0x1c, 0x70, 0xba, 0xdc, 0x17, 0x5e, 0x5e, + 0xee, 0x36, 0xd3, 0xb9, 0xb3, 0xed, 0xf9, 0x7e, 0xc6, 0x58, 0xea, 0x4d, 0xb6, 0x63, 0xfd, 0x7e, + 0x16, 0x77, 0xd0, 0x9a, 0x36, 0xcc, 0xe4, 0x0b, 0x70, 0x6b, 0xef, 0x8b, 0x9b, 0x19, 0x97, 0xdd, + 0x86, 0x93, 0x5c, 0xf9, 0xf0, 0x05, 0x42, 0x57, 0x49, 0x7c, 0x17, 0xdd, 0xe9, 0xfc, 0xd8, 0xf7, + 0xc6, 0xd4, 0x1f, 0x77, 0xc6, 0x7d, 0x7a, 0xe8, 0x3d, 0xf1, 0x0e, 0x7e, 0xf6, 0x1a, 0xa5, 0x77, + 0x81, 0x41, 0xbf, 0xf3, 0x74, 0x3c, 0xf8, 0xa5, 0x61, 0xe1, 0x7b, 0xa8, 0xb9, 0xac, 0x20, 0x7d, + 0x7f, 0x74, 0xe0, 0xf9, 0xc3, 0xa3, 0x7e, 0x63, 0xe5, 0x5d, 0xb4, 0x37, 0xf4, 0xf7, 0x0f, 0x3c, + 0xaf, 0xbf, 0x3f, 0xee, 0xf7, 0x1a, 0xe5, 0xee, 0x77, 0xe7, 0x17, 0x76, 0xe9, 0xe5, 0x85, 0x5d, + 0x7a, 0x7d, 0x61, 0x5b, 0xbf, 0xa7, 0xb6, 0xf5, 0x77, 0x6a, 0x5b, 0xff, 0xa4, 0xb6, 0x75, 0x9e, + 0xda, 0xd6, 0xbf, 0xa9, 0x6d, 0xbd, 0x4a, 0xed, 0xd2, 0xeb, 0xd4, 0xb6, 0xfe, 0xbc, 0xb4, 0x4b, + 0xe7, 0x97, 0x76, 0xe9, 0xe5, 0xa5, 0x5d, 0xfa, 0xb5, 0x52, 0xfc, 0x3f, 0x4c, 0xd6, 0xe1, 0x27, + 0xfc, 0xeb, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe9, 0xec, 0x47, 0x21, 0x4c, 0x06, 0x00, 0x00, } func (x AgentState) String() string { @@ -578,9 +569,6 @@ func (this *AgentCapabilities) Equal(that interface{}) bool { if this.CollectsData != that1.CollectsData { return false } - if this.StoresData != that1.StoresData { - return false - } return true } func (this *AgentParameters) Equal(that interface{}) bool { @@ -773,10 +761,9 @@ func (this *AgentCapabilities) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 5) s = append(s, "&agentpb.AgentCapabilities{") s = append(s, "CollectsData: "+fmt.Sprintf("%#v", this.CollectsData)+",\n") - s = append(s, "StoresData: "+fmt.Sprintf("%#v", this.StoresData)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -894,16 +881,6 @@ func (m *AgentCapabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.StoresData { - i-- - if m.StoresData { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } if m.CollectsData { i-- if m.CollectsData { @@ -1230,9 +1207,6 @@ func (m *AgentCapabilities) Size() (n int) { if m.CollectsData { n += 2 } - if m.StoresData { - n += 2 - } return n } @@ -1372,7 +1346,6 @@ func (this *AgentCapabilities) String() string { } s := strings.Join([]string{`&AgentCapabilities{`, `CollectsData:` + fmt.Sprintf("%v", this.CollectsData) + `,`, - `StoresData:` + fmt.Sprintf("%v", this.StoresData) + `,`, `}`, }, "") return s @@ -1508,26 +1481,6 @@ func (m *AgentCapabilities) Unmarshal(dAtA []byte) error { } } m.CollectsData = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StoresData", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAgent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StoresData = bool(v != 0) default: iNdEx = preIndex skippy, err := skipAgent(dAtA[iNdEx:]) diff --git a/src/vizier/services/shared/agentpb/agent.proto b/src/vizier/services/shared/agentpb/agent.proto index 1e7586d039e..b95cb1def0f 100644 --- a/src/vizier/services/shared/agentpb/agent.proto +++ b/src/vizier/services/shared/agentpb/agent.proto @@ -28,7 +28,6 @@ import "src/api/proto/uuidpb/uuid.proto"; // AgentCapabilities describes functions that the agent has available. message AgentCapabilities { bool collects_data = 1; - bool stores_data = 2; } message AgentParameters { diff --git a/vizier-chart/Chart.yaml b/vizier-chart/Chart.yaml deleted file mode 100644 index b91fc292c74..00000000000 --- a/vizier-chart/Chart.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v2 -name: vizier-chart -type: application -version: 0.14.15 diff --git a/vizier-chart/helm-install.sh b/vizier-chart/helm-install.sh deleted file mode 100644 index 5d14b60d58a..00000000000 --- a/vizier-chart/helm-install.sh +++ /dev/null @@ -1,40 +0,0 @@ - -#First you install pixie via px deploy, then run this script -kubectl get secret -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite secret {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get svc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl --overwrite svc {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get sa -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl sa --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get cm -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl cm --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get pvc -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl pvc --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get clusterrole -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl clusterrole --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get clusterrolebinding -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl clusterrolebinding --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get role -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl role --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get rolebinding -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl rolebinding --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get ds -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl ds --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get deployment -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl deployment --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl -kubectl get statefulset -n pl -o json | jq -r '.items[] | select(.metadata.annotations | has("helm.sh/release-name") | not) | .metadata.name' | xargs -I {} kubectl annotate -n pl statefulset --overwrite {} meta.helm.sh/release-name=pixie meta.helm.sh/release-namespace=pl - - - -kubectl get sa -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl sa --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get svc -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl svc --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get secret -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl secret --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get cm -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl cm --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get pvc -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl pvc --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get clusterrole -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl clusterrole --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get clusterrolebinding -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl clusterrolebinding --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get role -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl role --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get rolebinding -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl rolebinding --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get ds -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl ds --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get deployment -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl deployment --overwrite {} app.kubernetes.io/managed-by=Helm -kubectl get statefulset -n pl -o json | jq '.items[] | .metadata | select(.labels."app.kubernetes.io/managed-by=Helm" | not) | .name' | xargs -I {} kubectl label -n pl statefulset --overwrite {} app.kubernetes.io/managed-by=Helm - -keyid=f60a3c55-91fe-4dbc-b984-bf6ed4fdc323 -key=$(px api-key get $keyid) -if [ ! -f myvalues.yaml ]; then - echo "Error: myvalues.yaml not found" - exit 1 -fi - -helm upgrade --install pixie . --namespace pl --create-namespace --values myvalues.yaml - - diff --git a/vizier-chart/templates/00_secrets.yaml b/vizier-chart/templates/00_secrets.yaml deleted file mode 100644 index f87370a1825..00000000000 --- a/vizier-chart/templates/00_secrets.yaml +++ /dev/null @@ -1,100 +0,0 @@ ---- -apiVersion: v1 -data: - PL_CLOUD_ADDR: {{ if .Values.cloudAddr }}"{{ .Values.cloudAddr }}"{{ else }}"withpixie.ai:443"{{ end }} - PL_CLUSTER_NAME: "{{ .Values.clusterName }}" - PL_UPDATE_CLOUD_ADDR: {{ if .Values.cloudUpdateAddr }}"{{ .Values.cloudUpdateAddr }}"{{ else }}"withpixie.ai:443"{{ end }} -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - creationTimestamp: null - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - name: pl-cloud-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CUSTOM_ANNOTATIONS: "{{ .Values.customAnnotations }}" - PL_CUSTOM_LABELS: "{{ .Values.customLabels }}" - PL_DISABLE_AUTO_UPDATE: {{ if .Values.disableAutoUpdate }}"{{ .Values.disableAutoUpdate }}"{{ else }}"false"{{ end }} - PL_ETCD_OPERATOR_ENABLED: {{ if .Values.useEtcdOperator }}"true"{{else}}"false"{{end}} - PL_MD_ETCD_SERVER: https://pl-etcd-client.{{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }}.svc:2379 - PX_MEMORY_LIMIT: "{{ .Values.pemMemoryLimit }}" - PX_MEMORY_REQUEST: "{{ .Values.pemMemoryRequest }}" -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - creationTimestamp: null - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - name: pl-cluster-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: Secret -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - creationTimestamp: null - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - name: pl-cluster-secrets - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -stringData: - sentry-dsn: "{{ .Values.sentryDSN }}" ---- -apiVersion: v1 -kind: Secret -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - creationTimestamp: null - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - name: pl-deploy-secrets - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -stringData: - deploy-key: "{{ .Values.deployKey }}" diff --git a/vizier-chart/templates/01_nats.yaml b/vizier-chart/templates/01_nats.yaml deleted file mode 100644 index 29aedb8877f..00000000000 --- a/vizier-chart/templates/01_nats.yaml +++ /dev/null @@ -1,246 +0,0 @@ ---- -apiVersion: v1 -data: - nats.conf: | - pid_file: "/var/run/nats/nats.pid" - http: 8222 - - tls { - ca_file: "/etc/nats-server-tls-certs/ca.crt", - cert_file: "/etc/nats-server-tls-certs/server.crt", - key_file: "/etc/nats-server-tls-certs/server.key", - timeout: 3 - verify: true - } -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - name: nats-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - name: pl-nats - name: pl-nats - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: client - port: 4222 - selector: - app: pl-monitoring - name: pl-nats ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - name: pl-nats - name: pl-nats-mgmt - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - clusterIP: None - ports: - - name: cluster - port: 6222 - - name: monitor - port: 8222 - - name: metrics - port: 7777 - - name: leafnodes - port: 7422 - - name: gateways - port: 7522 - selector: - app: pl-monitoring - name: pl-nats ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - name: pl-nats - name: pl-nats - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - name: pl-nats - serviceName: pl-nats - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - name: pl-nats - plane: control - spec: - containers: - - command: - - nats-server - - --config - - /etc/nats-config/nats.conf - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CLUSTER_ADVERTISE - value: $(POD_NAME).pl-nats.$(POD_NAMESPACE).svc - image: '{{ if .Values.registry }}{{ .Values.registry }}/gcr.io-pixie-oss-pixie-prod-vizier-deps-nats:2.9.19-scratch@sha256:5de59286eb54ead4d4a9279846098d4097b9c17a3c0588182398a7250cde1af9{{else}}gcr.io/pixie-oss/pixie-prod/vizier-deps/nats:2.9.19-scratch@sha256:5de59286eb54ead4d4a9279846098d4097b9c17a3c0588182398a7250cde1af9{{end}}' - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - /nats-server -sl=ldm=/var/run/nats/nats.pid && /bin/sleep 60 - livenessProbe: - httpGet: - path: / - port: 8222 - initialDelaySeconds: 10 - timeoutSeconds: 5 - name: pl-nats - ports: - - containerPort: 4222 - name: client - - containerPort: 7422 - name: leafnodes - - containerPort: 6222 - name: cluster - - containerPort: 8222 - name: monitor - - containerPort: 7777 - name: metrics - readinessProbe: - httpGet: - path: / - port: 8222 - initialDelaySeconds: 10 - timeoutSeconds: 5 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/nats-config - name: config-volume - - mountPath: /etc/nats-server-tls-certs - name: nats-server-tls-volume - - mountPath: /var/run/nats - name: pid - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - shareProcessNamespace: true - terminationGracePeriodSeconds: 60 - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: nats-server-tls-volume - secret: - secretName: service-tls-certs - - configMap: - name: nats-config - name: config-volume - - emptyDir: {} - name: pid diff --git a/vizier-chart/templates/02_etcd.yaml b/vizier-chart/templates/02_etcd.yaml deleted file mode 100644 index 4f514ee8aaa..00000000000 --- a/vizier-chart/templates/02_etcd.yaml +++ /dev/null @@ -1,238 +0,0 @@ -{{if .Values.useEtcdOperator}} ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - etcd_cluster: pl-etcd - name: pl-etcd - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - clusterIP: None - ports: - - name: client - port: 2379 - - name: peer - port: 2380 - publishNotReadyAddresses: true - selector: - app: pl-monitoring - etcd_cluster: pl-etcd ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - etcd_cluster: pl-etcd - name: pl-etcd-client - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: etcd-client - port: 2379 - selector: - app: pl-monitoring - etcd_cluster: pl-etcd ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - etcd_cluster: pl-etcd - name: pl-etcd - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - podManagementPolicy: Parallel - replicas: 3 - selector: - matchLabels: - app: pl-monitoring - etcd_cluster: pl-etcd - serviceName: pl-etcd - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - etcd_cluster: pl-etcd - plane: control - name: pl-etcd - spec: - containers: - - env: - - name: INITIAL_CLUSTER_SIZE - value: "3" - - name: CLUSTER_NAME - value: pl-etcd - - name: ETCDCTL_API - value: "3" - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: DATA_DIR - value: /var/run/etcd - - name: ETCD_AUTO_COMPACTION_RETENTION - value: "5" - - name: ETCD_AUTO_COMPACTION_MODE - value: revision - image: '{{ if .Values.registry }}{{ .Values.registry }}/gcr.io-pixie-oss-pixie-dev-public-etcd:3.5.9@sha256:e18afc6dda592b426834342393c4c4bd076cb46fa7e10fa7818952cae3047ca9{{else}}gcr.io/pixie-oss/pixie-dev-public/etcd:3.5.9@sha256:e18afc6dda592b426834342393c4c4bd076cb46fa7e10fa7818952cae3047ca9{{end}}' - lifecycle: - preStop: - exec: - command: - - /etc/etcd/scripts/prestop.sh - livenessProbe: - exec: - command: - - /etc/etcd/scripts/healthcheck.sh - failureThreshold: 5 - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - name: etcd - ports: - - containerPort: 2379 - name: client - - containerPort: 2380 - name: server - readinessProbe: - exec: - command: - - /etc/etcd/scripts/healthcheck.sh - failureThreshold: 3 - initialDelaySeconds: 1 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 5 - securityContext: - capabilities: - add: - - NET_RAW - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /var/run/etcd - name: etcd-data - - mountPath: /etc/etcdtls/member/peer-tls - name: member-peer-tls - - mountPath: /etc/etcdtls/member/server-tls - name: member-server-tls - - mountPath: /etc/etcdtls/client/etcd-tls - name: etcd-client-tls - securityContext: - seccompProfile: - type: RuntimeDefault - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: member-peer-tls - secret: - secretName: etcd-peer-tls-certs - - name: member-server-tls - secret: - secretName: etcd-server-tls-certs - - name: etcd-client-tls - secret: - secretName: etcd-client-tls-certs - - emptyDir: {} - name: etcd-data ---- -apiVersion: {{ if .Values.useBetaPdbVersion }}"policy/v1beta1"{{ else }}"policy/v1"{{ end }} -kind: PodDisruptionBudget -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - name: pl-etcd-pdb - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - minAvailable: 51% - selector: - matchLabels: - app: pl-monitoring - etcd_cluster: pl-etcd - -{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/03_vizier_etcd.yaml b/vizier-chart/templates/03_vizier_etcd.yaml deleted file mode 100644 index 87ccb522974..00000000000 --- a/vizier-chart/templates/03_vizier_etcd.yaml +++ /dev/null @@ -1,2309 +0,0 @@ -{{if and (not .Values.autopilot) .Values.useEtcdOperator}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-ns-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - services - - events - - pods/log - verbs: - - get - - watch - - list -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - secrets - - pods - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - px.dev - resources: - - viziers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - cloud-conn-election - resources: - - leases - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - - secrets - - pods - - services - - persistentvolumes - - persistentvolumeclaims - - serviceaccounts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - - pods/log - verbs: - - get - - watch - - list -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resourceNames: - - cloud-conn-election - - metadata-election - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - px.dev - resources: - - viziers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - px.dev - resources: - - viziers/status - verbs: - - get - - list - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - - rolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-vizier-crd-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - px.dev - resources: - - viziers - - viziers/status - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - metadata-election - resources: - - leases - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - creationTimestamp: null - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - watch - - list -- apiGroups: - - "" - resourceNames: - - kube-system - resources: - - namespaces - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-node-view - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - watch - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-cluster-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterroles - - clusterrolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resourceNames: - - kube-system - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - nodes - - pods - - services - - endpoints - - namespaces - verbs: - - get - - watch - - list -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get - - watch - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - pods - - services - - endpoints - - namespaces - verbs: - - watch - - get - - list -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - watch - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-cert-provisioner-role -subjects: -- kind: ServiceAccount - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-cloud-connector-ns-role -subjects: -- kind: ServiceAccount - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-updater-role -subjects: -- kind: ServiceAccount - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-crd-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: default - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-crd-metadata-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-metadata-role -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-query-broker-role -subjects: -- kind: ServiceAccount - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-crd-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-cloud-connector-role -subjects: -- kind: ServiceAccount - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-node-view-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-node-view -subjects: -- kind: ServiceAccount - name: default - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-updater-cluster-role -subjects: -- kind: ServiceAccount - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-vizier-metadata -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-node-view-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-node-view -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CLIENT_TLS_CERT: /certs/client.crt - PL_CLIENT_TLS_KEY: /certs/client.key - PL_SERVER_TLS_CERT: /certs/server.crt - PL_SERVER_TLS_KEY: /certs/server.key - PL_TLS_CA_CERT: /certs/ca.crt -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-tls-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CLIENT_TLS_CERT: /certs/client.crt - PL_CLIENT_TLS_KEY: /certs/client.key - PL_SERVER_TLS_CERT: /certs/server.crt - PL_SERVER_TLS_KEY: /certs/server.key - PL_TLS_CA_CERT: /certs/ca.crt -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-tls-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin-service - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - clusterIP: None - ports: - - name: tcp-http2 - port: 59300 - protocol: TCP - targetPort: 59300 - selector: - app: pl-monitoring - component: vizier - name: kelvin - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: vizier-cloud-connector-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50800 - protocol: TCP - targetPort: 50800 - selector: - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - vizier-bootstrap: "true" - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50400 - protocol: TCP - targetPort: 50400 - selector: - app: pl-monitoring - component: vizier - name: vizier-metadata - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50300 - protocol: TCP - targetPort: 50300 - - name: tcp-grpc-web - port: 50305 - protocol: TCP - targetPort: 50305 - selector: - app: pl-monitoring - component: vizier - name: vizier-query-broker - type: ClusterIP ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: kelvin - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin - plane: data - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - - name: PL_SENTRY_DSN - valueFrom: - secretKeyRef: - key: sentry-dsn - name: pl-cluster-secrets - optional: true - - name: PL_HOST_PATH - value: /host - - name: PL_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PL_HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_VIZIER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: TCMALLOC_SAMPLE_PARAMETER - value: "1048576" - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: app - ports: - - containerPort: 59300 - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - - mountPath: /sys - name: sys - readOnly: true - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-cloud-connector-svc - - name: SERVICE_PORT - value: "50800" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: cc-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-query-broker-svc - - name: SERVICE_PORT - value: "50300" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: qb-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 30 - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - hostPath: - path: /sys - type: Directory - name: sys ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: vizier-cloud-connector - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - vizier-bootstrap: "true" - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - plane: control - vizier-bootstrap: "true" - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_DEPLOY_KEY - valueFrom: - secretKeyRef: - key: deploy-key - name: {{ if .Values.customDeployKeySecret }}"{{ .Values.customDeployKeySecret }}"{{else}}"pl-deploy-secrets"{{end}} - optional: true - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MAX_EXPECTED_CLOCK_SKEW - value: "2000" - - name: PL_RENEW_PERIOD - value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} - envFrom: - - configMapRef: - name: pl-cloud-config - - configMapRef: - name: pl-cloud-connector-tls-config - - configMapRef: - name: pl-cluster-config - optional: true - image: '{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50800 - scheme: HTTPS - name: app - ports: - - containerPort: 50800 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; - until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 - ]; do echo "waiting for ${URL}"; sleep 2; done; - env: - - name: SERVICE_NAME - value: pl-nats-mgmt - - name: SERVICE_PORT - value: "8222" - - name: HEALTH_PATH - value: "" - - name: PROTOCOL - value: http - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: nats-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: cloud-conn-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-metadata - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - px.dev/metrics_port: "50400" - px.dev/metrics_scrape: "true" - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata - plane: control - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MAX_EXPECTED_CLOCK_SKEW - value: "2000" - - name: PL_RENEW_PERIOD - value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} - - name: PL_MD_ETCD_SERVER - value: https://pl-etcd-client.$(PL_POD_NAMESPACE).svc:2379 - - name: PL_ETCD_OPERATOR_ENABLED - value: "true" - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50400 - scheme: HTTPS - initialDelaySeconds: 120 - periodSeconds: 10 - name: app - readinessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: 50400 - scheme: HTTPS - initialDelaySeconds: 30 - periodSeconds: 10 - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; - until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 - ]; do echo "waiting for ${URL}"; sleep 2; done; - env: - - name: SERVICE_NAME - value: pl-nats-mgmt - - name: SERVICE_PORT - value: "8222" - - name: HEALTH_PATH - value: "" - - name: PROTOCOL - value: http - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: nats-wait - - command: - - sh - - -c - - set -xe; ETCD_PATH="${PL_MD_ETCD_SERVER}"; URL="${ETCD_PATH}${HEALTH_PATH}"; - until [ $(curl --cacert /certs/ca.crt --key /certs/client.key --cert /certs/client.crt - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; - env: - - name: HEALTH_PATH - value: /health - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MD_ETCD_SERVER - value: https://pl-etcd-client.$(PL_POD_NAMESPACE).svc:2379 - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: etcd-wait - volumeMounts: - - mountPath: /certs - name: certs - serviceAccountName: metadata-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-query-broker - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - px.dev/metrics_port: "50300" - px.dev/metrics_scrape: "true" - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker - plane: control - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - - name: PL_SENTRY_DSN - valueFrom: - secretKeyRef: - key: sentry-dsn - name: pl-cluster-secrets - optional: true - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_POD_IP_ADDRESS - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_CLOUD_ADDR - valueFrom: - configMapKeyRef: - key: PL_CLOUD_ADDR - name: pl-cloud-config - - name: PL_DATA_ACCESS - value: {{ if .Values.dataAccess }}"{{ .Values.dataAccess }}"{{else}}"Full"{{end}} - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50300 - scheme: HTTPS - name: app - ports: - - containerPort: 50300 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-cloud-connector-svc - - name: SERVICE_PORT - value: "50800" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: cc-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-metadata-svc - - name: SERVICE_PORT - value: "50400" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: mds-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: query-broker-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - configMap: - name: proxy-envoy-config - name: envoy-yaml ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-pem - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-pem - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-pem - plane: data - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - args: [] - env: - - name: PL_PEM_ENV_VAR_PLACEHOLDER - value: "true" # This is un-used, and is just a placeholder used to templatize our YAMLs for Helm. - {{- range $key, $value := .Values.customPEMFlags}} - - name: {{$key}} - value: "{{$value}}" - {{- end}} - {{- if .Values.datastreamBufferSpikeSize }} - - name: PL_DATASTREAM_BUFFER_SPIKE_SIZE - value: "{{ .Values.datastreamBufferSpikeSize }}" - {{- end}} - {{- if .Values.datastreamBufferSize }} - - name: PL_DATASTREAM_BUFFER_SIZE - value: "{{ .Values.datastreamBufferSize }}" - {{- end}} - - name: TCMALLOC_SAMPLE_PARAMETER - value: "1048576" - - name: PL_CLIENT_TLS_CERT - value: /certs/client.crt - - name: PL_CLIENT_TLS_KEY - value: /certs/client.key - - name: PL_TLS_CA_CERT - value: /certs/ca.crt - - name: PL_DISABLE_SSL - value: "false" - - name: PL_HOST_PATH - value: /host - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_VIZIER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_CLOCK_CONVERTER - value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: pem - resources: - limits: - memory: {{ if .Values.pemMemoryLimit }}"{{ .Values.pemMemoryLimit }}"{{else}}"2Gi"{{end}} - requests: - memory: {{ if .Values.pemMemoryRequest }}"{{ .Values.pemMemoryRequest }}"{{else}}"2Gi"{{end}} - securityContext: - capabilities: - add: - - SYS_PTRACE - - SYS_ADMIN - privileged: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /host - name: host-root - readOnly: true - - mountPath: /sys - name: sys - readOnly: true - - mountPath: /certs - name: certs - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - hostPID: true - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-query-broker-svc - - name: SERVICE_PORT - value: "50300" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: qb-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 10 - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists - volumes: - - hostPath: - path: / - type: Directory - name: host-root - - hostPath: - path: /sys - type: Directory - name: sys - - name: certs - secret: - secretName: service-tls-certs - updateStrategy: - rollingUpdate: - maxUnavailable: 20 - type: RollingUpdate ---- -apiVersion: batch/v1 -kind: Job -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cert-provisioner-job - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - backoffLimit: 1 - completions: 1 - parallelism: 1 - template: - metadata: - labels: - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cert-provisioner-job - spec: - containers: - - env: - - name: PL_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - envFrom: - - configMapRef: - name: pl-cloud-config - - configMapRef: - name: pl-cluster-config - optional: true - image: '{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: provisioner - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - restartPolicy: Never - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: pl-cert-provisioner-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - -{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/04_vizier_persistent.yaml b/vizier-chart/templates/04_vizier_persistent.yaml deleted file mode 100644 index 1f306913ac2..00000000000 --- a/vizier-chart/templates/04_vizier_persistent.yaml +++ /dev/null @@ -1,2343 +0,0 @@ -{{if and (not .Values.autopilot) (not .Values.useEtcdOperator)}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-ns-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - services - - events - - pods/log - verbs: - - get - - watch - - list -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - secrets - - pods - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - px.dev - resources: - - viziers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - cloud-conn-election - resources: - - leases - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - - secrets - - pods - - services - - persistentvolumes - - persistentvolumeclaims - - serviceaccounts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - - pods/log - verbs: - - get - - watch - - list -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resourceNames: - - cloud-conn-election - - metadata-election - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - px.dev - resources: - - viziers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - px.dev - resources: - - viziers/status - verbs: - - get - - list - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - - rolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-vizier-crd-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - px.dev - resources: - - viziers - - viziers/status - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - metadata-election - resources: - - leases - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - creationTimestamp: null - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - watch - - list -- apiGroups: - - "" - resourceNames: - - kube-system - resources: - - namespaces - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-node-view - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - watch - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-cluster-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterroles - - clusterrolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resourceNames: - - kube-system - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - nodes - - pods - - services - - endpoints - - namespaces - verbs: - - get - - watch - - list -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get - - watch - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - pods - - services - - endpoints - - namespaces - verbs: - - watch - - get - - list -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - watch - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-cert-provisioner-role -subjects: -- kind: ServiceAccount - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-cloud-connector-ns-role -subjects: -- kind: ServiceAccount - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-updater-role -subjects: -- kind: ServiceAccount - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-crd-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: default - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-crd-metadata-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-metadata-role -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-query-broker-role -subjects: -- kind: ServiceAccount - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-crd-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-cloud-connector-role -subjects: -- kind: ServiceAccount - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-node-view-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-node-view -subjects: -- kind: ServiceAccount - name: default - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-updater-cluster-role -subjects: -- kind: ServiceAccount - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-vizier-metadata -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-node-view-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-node-view -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CLIENT_TLS_CERT: /certs/client.crt - PL_CLIENT_TLS_KEY: /certs/client.key - PL_SERVER_TLS_CERT: /certs/server.crt - PL_SERVER_TLS_KEY: /certs/server.key - PL_TLS_CA_CERT: /certs/ca.crt -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-tls-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CLIENT_TLS_CERT: /certs/client.crt - PL_CLIENT_TLS_KEY: /certs/client.key - PL_SERVER_TLS_CERT: /certs/server.crt - PL_SERVER_TLS_KEY: /certs/server.key - PL_TLS_CA_CERT: /certs/ca.crt -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-tls-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin-service - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - clusterIP: None - ports: - - name: tcp-http2 - port: 59300 - protocol: TCP - targetPort: 59300 - selector: - app: pl-monitoring - component: vizier - name: kelvin - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: vizier-cloud-connector-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50800 - protocol: TCP - targetPort: 50800 - selector: - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - vizier-bootstrap: "true" - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50400 - protocol: TCP - targetPort: 50400 - selector: - app: pl-monitoring - component: vizier - name: vizier-metadata - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50300 - protocol: TCP - targetPort: 50300 - - name: tcp-grpc-web - port: 50305 - protocol: TCP - targetPort: 50305 - selector: - app: pl-monitoring - component: vizier - name: vizier-query-broker - type: ClusterIP ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-pv-claim - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 16Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: kelvin - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin - plane: data - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - - name: PL_SENTRY_DSN - valueFrom: - secretKeyRef: - key: sentry-dsn - name: pl-cluster-secrets - optional: true - - name: PL_HOST_PATH - value: /host - - name: PL_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PL_HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_VIZIER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: TCMALLOC_SAMPLE_PARAMETER - value: "1048576" - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: app - ports: - - containerPort: 59300 - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - - mountPath: /sys - name: sys - readOnly: true - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-cloud-connector-svc - - name: SERVICE_PORT - value: "50800" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: cc-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-query-broker-svc - - name: SERVICE_PORT - value: "50300" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: qb-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 30 - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - hostPath: - path: /sys - type: Directory - name: sys ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: vizier-cloud-connector - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - vizier-bootstrap: "true" - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - plane: control - vizier-bootstrap: "true" - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_DEPLOY_KEY - valueFrom: - secretKeyRef: - key: deploy-key - name: {{ if .Values.customDeployKeySecret }}"{{ .Values.customDeployKeySecret }}"{{else}}"pl-deploy-secrets"{{end}} - optional: true - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MAX_EXPECTED_CLOCK_SKEW - value: "2000" - - name: PL_RENEW_PERIOD - value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} - envFrom: - - configMapRef: - name: pl-cloud-config - - configMapRef: - name: pl-cloud-connector-tls-config - - configMapRef: - name: pl-cluster-config - optional: true - image: '{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50800 - scheme: HTTPS - name: app - ports: - - containerPort: 50800 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; - until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 - ]; do echo "waiting for ${URL}"; sleep 2; done; - env: - - name: SERVICE_NAME - value: pl-nats-mgmt - - name: SERVICE_PORT - value: "8222" - - name: HEALTH_PATH - value: "" - - name: PROTOCOL - value: http - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: nats-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: cloud-conn-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-query-broker - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - px.dev/metrics_port: "50300" - px.dev/metrics_scrape: "true" - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker - plane: control - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - - name: PL_SENTRY_DSN - valueFrom: - secretKeyRef: - key: sentry-dsn - name: pl-cluster-secrets - optional: true - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_POD_IP_ADDRESS - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_CLOUD_ADDR - valueFrom: - configMapKeyRef: - key: PL_CLOUD_ADDR - name: pl-cloud-config - - name: PL_DATA_ACCESS - value: {{ if .Values.dataAccess }}"{{ .Values.dataAccess }}"{{else}}"Full"{{end}} - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50300 - scheme: HTTPS - name: app - ports: - - containerPort: 50300 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-cloud-connector-svc - - name: SERVICE_PORT - value: "50800" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: cc-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-metadata-svc - - name: SERVICE_PORT - value: "50400" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: mds-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: query-broker-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - configMap: - name: proxy-envoy-config - name: envoy-yaml ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-metadata - serviceName: vizier-metadata - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - px.dev/metrics_port: "50400" - px.dev/metrics_scrape: "true" - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata - plane: control - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MAX_EXPECTED_CLOCK_SKEW - value: "2000" - - name: PL_RENEW_PERIOD - value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} - - name: PL_ETCD_OPERATOR_ENABLED - value: "false" - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50400 - scheme: HTTPS - initialDelaySeconds: 120 - periodSeconds: 10 - name: app - readinessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: 50400 - scheme: HTTPS - initialDelaySeconds: 30 - periodSeconds: 10 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - - mountPath: /metadata - name: metadata-volume - initContainers: - - command: - - sh - - -c - - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; - until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 - ]; do echo "waiting for ${URL}"; sleep 2; done; - env: - - name: SERVICE_NAME - value: pl-nats-mgmt - - name: SERVICE_PORT - value: "8222" - - name: HEALTH_PATH - value: "" - - name: PROTOCOL - value: http - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: nats-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: metadata-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - name: metadata-volume - persistentVolumeClaim: - claimName: metadata-pv-claim - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-pem - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-pem - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-pem - plane: data - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - args: [] - env: - - name: PL_PEM_ENV_VAR_PLACEHOLDER - value: "true" # This is un-used, and is just a placeholder used to templatize our YAMLs for Helm. - {{- range $key, $value := .Values.customPEMFlags}} - - name: {{$key}} - value: "{{$value}}" - {{- end}} - {{- if .Values.datastreamBufferSpikeSize }} - - name: PL_DATASTREAM_BUFFER_SPIKE_SIZE - value: "{{ .Values.datastreamBufferSpikeSize }}" - {{- end}} - {{- if .Values.datastreamBufferSize }} - - name: PL_DATASTREAM_BUFFER_SIZE - value: "{{ .Values.datastreamBufferSize }}" - {{- end}} - - name: TCMALLOC_SAMPLE_PARAMETER - value: "1048576" - - name: PL_CLIENT_TLS_CERT - value: /certs/client.crt - - name: PL_CLIENT_TLS_KEY - value: /certs/client.key - - name: PL_TLS_CA_CERT - value: /certs/ca.crt - - name: PL_DISABLE_SSL - value: "false" - - name: PL_HOST_PATH - value: /host - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_VIZIER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_CLOCK_CONVERTER - value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: pem - resources: - limits: - memory: {{ if .Values.pemMemoryLimit }}"{{ .Values.pemMemoryLimit }}"{{else}}"2Gi"{{end}} - requests: - memory: {{ if .Values.pemMemoryRequest }}"{{ .Values.pemMemoryRequest }}"{{else}}"2Gi"{{end}} - securityContext: - capabilities: - add: - - SYS_PTRACE - - SYS_ADMIN - privileged: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /host - name: host-root - readOnly: true - - mountPath: /sys - name: sys - readOnly: true - - mountPath: /certs - name: certs - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - hostPID: true - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-query-broker-svc - - name: SERVICE_PORT - value: "50300" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: qb-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 10 - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists - volumes: - - hostPath: - path: / - type: Directory - name: host-root - - hostPath: - path: /sys - type: Directory - name: sys - - name: certs - secret: - secretName: service-tls-certs - updateStrategy: - rollingUpdate: - maxUnavailable: 20 - type: RollingUpdate ---- -apiVersion: batch/v1 -kind: Job -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cert-provisioner-job - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - backoffLimit: 1 - completions: 1 - parallelism: 1 - template: - metadata: - labels: - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cert-provisioner-job - spec: - containers: - - env: - - name: PL_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - envFrom: - - configMapRef: - name: pl-cloud-config - - configMapRef: - name: pl-cluster-config - optional: true - image: '{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: provisioner - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - restartPolicy: Never - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: pl-cert-provisioner-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - -{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/05_vizier_etcd_ap.yaml b/vizier-chart/templates/05_vizier_etcd_ap.yaml deleted file mode 100644 index 3c246bd3b11..00000000000 --- a/vizier-chart/templates/05_vizier_etcd_ap.yaml +++ /dev/null @@ -1,2330 +0,0 @@ -{{if and (.Values.autopilot) (.Values.useEtcdOperator)}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-ns-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - services - - events - - pods/log - verbs: - - get - - watch - - list -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - secrets - - pods - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - px.dev - resources: - - viziers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - cloud-conn-election - resources: - - leases - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - - secrets - - pods - - services - - persistentvolumes - - persistentvolumeclaims - - serviceaccounts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - - pods/log - verbs: - - get - - watch - - list -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resourceNames: - - cloud-conn-election - - metadata-election - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - px.dev - resources: - - viziers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - px.dev - resources: - - viziers/status - verbs: - - get - - list - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - - rolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-vizier-crd-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - px.dev - resources: - - viziers - - viziers/status - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - metadata-election - resources: - - leases - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - creationTimestamp: null - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - watch - - list -- apiGroups: - - "" - resourceNames: - - kube-system - resources: - - namespaces - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-node-view - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - watch - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-cluster-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterroles - - clusterrolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resourceNames: - - kube-system - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - nodes - - pods - - services - - endpoints - - namespaces - verbs: - - get - - watch - - list -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get - - watch - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - pods - - services - - endpoints - - namespaces - verbs: - - watch - - get - - list -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - watch - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-cert-provisioner-role -subjects: -- kind: ServiceAccount - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-cloud-connector-ns-role -subjects: -- kind: ServiceAccount - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-updater-role -subjects: -- kind: ServiceAccount - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-crd-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: default - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-crd-metadata-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-metadata-role -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-query-broker-role -subjects: -- kind: ServiceAccount - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-crd-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-cloud-connector-role -subjects: -- kind: ServiceAccount - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-node-view-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-node-view -subjects: -- kind: ServiceAccount - name: default - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-updater-cluster-role -subjects: -- kind: ServiceAccount - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-vizier-metadata -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-node-view-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-node-view -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CLIENT_TLS_CERT: /certs/client.crt - PL_CLIENT_TLS_KEY: /certs/client.key - PL_SERVER_TLS_CERT: /certs/server.crt - PL_SERVER_TLS_KEY: /certs/server.key - PL_TLS_CA_CERT: /certs/ca.crt -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-tls-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CLIENT_TLS_CERT: /certs/client.crt - PL_CLIENT_TLS_KEY: /certs/client.key - PL_SERVER_TLS_CERT: /certs/server.crt - PL_SERVER_TLS_KEY: /certs/server.key - PL_TLS_CA_CERT: /certs/ca.crt -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-tls-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin-service - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - clusterIP: None - ports: - - name: tcp-http2 - port: 59300 - protocol: TCP - targetPort: 59300 - selector: - app: pl-monitoring - component: vizier - name: kelvin - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: vizier-cloud-connector-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50800 - protocol: TCP - targetPort: 50800 - selector: - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - vizier-bootstrap: "true" - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50400 - protocol: TCP - targetPort: 50400 - selector: - app: pl-monitoring - component: vizier - name: vizier-metadata - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50300 - protocol: TCP - targetPort: 50300 - - name: tcp-grpc-web - port: 50305 - protocol: TCP - targetPort: 50305 - selector: - app: pl-monitoring - component: vizier - name: vizier-query-broker - type: ClusterIP ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: kelvin - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin - plane: data - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - - name: PL_SENTRY_DSN - valueFrom: - secretKeyRef: - key: sentry-dsn - name: pl-cluster-secrets - optional: true - - name: PL_HOST_PATH - value: /host - - name: PL_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PL_HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_VIZIER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: TCMALLOC_SAMPLE_PARAMETER - value: "1048576" - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: app - ports: - - containerPort: 59300 - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - - mountPath: /sys - name: sys - readOnly: true - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-cloud-connector-svc - - name: SERVICE_PORT - value: "50800" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: cc-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-query-broker-svc - - name: SERVICE_PORT - value: "50300" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: qb-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 30 - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - hostPath: - path: /sys - type: Directory - name: sys ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: vizier-cloud-connector - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - vizier-bootstrap: "true" - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - plane: control - vizier-bootstrap: "true" - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_DEPLOY_KEY - valueFrom: - secretKeyRef: - key: deploy-key - name: {{ if .Values.customDeployKeySecret }}"{{ .Values.customDeployKeySecret }}"{{else}}"pl-deploy-secrets"{{end}} - optional: true - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MAX_EXPECTED_CLOCK_SKEW - value: "2000" - - name: PL_RENEW_PERIOD - value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} - envFrom: - - configMapRef: - name: pl-cloud-config - - configMapRef: - name: pl-cloud-connector-tls-config - - configMapRef: - name: pl-cluster-config - optional: true - image: '{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50800 - scheme: HTTPS - name: app - ports: - - containerPort: 50800 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; - until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 - ]; do echo "waiting for ${URL}"; sleep 2; done; - env: - - name: SERVICE_NAME - value: pl-nats-mgmt - - name: SERVICE_PORT - value: "8222" - - name: HEALTH_PATH - value: "" - - name: PROTOCOL - value: http - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: nats-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: cloud-conn-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-metadata - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - px.dev/metrics_port: "50400" - px.dev/metrics_scrape: "true" - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata - plane: control - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MAX_EXPECTED_CLOCK_SKEW - value: "2000" - - name: PL_RENEW_PERIOD - value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} - - name: PL_MD_ETCD_SERVER - value: https://pl-etcd-client.$(PL_POD_NAMESPACE).svc:2379 - - name: PL_ETCD_OPERATOR_ENABLED - value: "true" - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50400 - scheme: HTTPS - initialDelaySeconds: 120 - periodSeconds: 10 - name: app - readinessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: 50400 - scheme: HTTPS - initialDelaySeconds: 30 - periodSeconds: 10 - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; - until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 - ]; do echo "waiting for ${URL}"; sleep 2; done; - env: - - name: SERVICE_NAME - value: pl-nats-mgmt - - name: SERVICE_PORT - value: "8222" - - name: HEALTH_PATH - value: "" - - name: PROTOCOL - value: http - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: nats-wait - - command: - - sh - - -c - - set -xe; ETCD_PATH="${PL_MD_ETCD_SERVER}"; URL="${ETCD_PATH}${HEALTH_PATH}"; - until [ $(curl --cacert /certs/ca.crt --key /certs/client.key --cert /certs/client.crt - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; - env: - - name: HEALTH_PATH - value: /health - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MD_ETCD_SERVER - value: https://pl-etcd-client.$(PL_POD_NAMESPACE).svc:2379 - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: etcd-wait - volumeMounts: - - mountPath: /certs - name: certs - serviceAccountName: metadata-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-query-broker - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - px.dev/metrics_port: "50300" - px.dev/metrics_scrape: "true" - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker - plane: control - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - - name: PL_SENTRY_DSN - valueFrom: - secretKeyRef: - key: sentry-dsn - name: pl-cluster-secrets - optional: true - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_POD_IP_ADDRESS - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_CLOUD_ADDR - valueFrom: - configMapKeyRef: - key: PL_CLOUD_ADDR - name: pl-cloud-config - - name: PL_DATA_ACCESS - value: {{ if .Values.dataAccess }}"{{ .Values.dataAccess }}"{{else}}"Full"{{end}} - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50300 - scheme: HTTPS - name: app - ports: - - containerPort: 50300 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-cloud-connector-svc - - name: SERVICE_PORT - value: "50800" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: cc-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-metadata-svc - - name: SERVICE_PORT - value: "50400" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: mds-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: query-broker-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - configMap: - name: proxy-envoy-config - name: envoy-yaml ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-pem - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-pem - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-pem - plane: data - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - args: [] - env: - - name: PL_PEM_ENV_VAR_PLACEHOLDER - value: "true" # This is un-used, and is just a placeholder used to templatize our YAMLs for Helm. - {{- range $key, $value := .Values.customPEMFlags}} - - name: {{$key}} - value: "{{$value}}" - {{- end}} - {{- if .Values.datastreamBufferSpikeSize }} - - name: PL_DATASTREAM_BUFFER_SPIKE_SIZE - value: "{{ .Values.datastreamBufferSpikeSize }}" - {{- end}} - {{- if .Values.datastreamBufferSize }} - - name: PL_DATASTREAM_BUFFER_SIZE - value: "{{ .Values.datastreamBufferSize }}" - {{- end}} - - name: TCMALLOC_SAMPLE_PARAMETER - value: "1048576" - - name: PL_CLIENT_TLS_CERT - value: /certs/client.crt - - name: PL_CLIENT_TLS_KEY - value: /certs/client.key - - name: PL_TLS_CA_CERT - value: /certs/ca.crt - - name: PL_DISABLE_SSL - value: "false" - - name: PL_HOST_PATH - value: /host - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_VIZIER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_CLOCK_CONVERTER - value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: pem - resources: - limits: - memory: {{ if .Values.pemMemoryLimit }}"{{ .Values.pemMemoryLimit }}"{{else}}"2Gi"{{end}} - requests: - memory: {{ if .Values.pemMemoryRequest }}"{{ .Values.pemMemoryRequest }}"{{else}}"2Gi"{{end}} - securityContext: - capabilities: - add: - - SYS_PTRACE - - SYS_ADMIN - privileged: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /host/lib - name: host-lib - readOnly: true - - mountPath: /host/var - name: host-var - readOnly: true - - mountPath: /host/boot - name: host-boot - readOnly: true - - mountPath: /host/etc - name: host-etc - readOnly: true - - mountPath: /sys - name: sys - readOnly: true - - mountPath: /certs - name: certs - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - hostPID: true - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-query-broker-svc - - name: SERVICE_PORT - value: "50300" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: qb-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 10 - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists - volumes: - - hostPath: - path: /lib - type: Directory - name: host-lib - - hostPath: - path: /var - type: Directory - name: host-var - - hostPath: - path: /boot - type: Directory - name: host-boot - - hostPath: - path: /etc - type: Directory - name: host-etc - - hostPath: - path: /sys - type: Directory - name: sys - - name: certs - secret: - secretName: service-tls-certs - updateStrategy: - rollingUpdate: - maxUnavailable: 20 - type: RollingUpdate ---- -apiVersion: batch/v1 -kind: Job -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cert-provisioner-job - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - backoffLimit: 1 - completions: 1 - parallelism: 1 - template: - metadata: - labels: - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cert-provisioner-job - spec: - containers: - - env: - - name: PL_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - envFrom: - - configMapRef: - name: pl-cloud-config - - configMapRef: - name: pl-cluster-config - optional: true - image: '{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: provisioner - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - restartPolicy: Never - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: pl-cert-provisioner-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - -{{- end}} \ No newline at end of file diff --git a/vizier-chart/templates/06_vizier_persistent_ap.yaml b/vizier-chart/templates/06_vizier_persistent_ap.yaml deleted file mode 100644 index 99940e52bd5..00000000000 --- a/vizier-chart/templates/06_vizier_persistent_ap.yaml +++ /dev/null @@ -1,2364 +0,0 @@ -{{if and (.Values.autopilot) (not .Values.useEtcdOperator)}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-ns-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - services - - events - - pods/log - verbs: - - get - - watch - - list -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - secrets - - pods - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - px.dev - resources: - - viziers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - cloud-conn-election - resources: - - leases - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - - secrets - - pods - - services - - persistentvolumes - - persistentvolumeclaims - - serviceaccounts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - - pods/log - verbs: - - get - - watch - - list -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - batch - resources: - - cronjobs - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - coordination.k8s.io - resourceNames: - - cloud-conn-election - - metadata-election - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - px.dev - resources: - - viziers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - px.dev - resources: - - viziers/status - verbs: - - get - - list - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - - rolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-vizier-crd-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - px.dev - resources: - - viziers - - viziers/status - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - coordination.k8s.io - resourceNames: - - metadata-election - resources: - - leases - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - creationTimestamp: null - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - watch - - list -- apiGroups: - - "" - resourceNames: - - kube-system - resources: - - namespaces - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-node-view - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - watch - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-cluster-role - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterroles - - clusterrolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resourceNames: - - kube-system - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - nodes - - pods - - services - - endpoints - - namespaces - verbs: - - get - - watch - - list -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get - - watch - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -rules: -- apiGroups: - - "" - resources: - - pods - - services - - endpoints - - namespaces - verbs: - - watch - - get - - list -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - watch - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cert-provisioner-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-cert-provisioner-role -subjects: -- kind: ServiceAccount - name: pl-cert-provisioner-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-cloud-connector-ns-role -subjects: -- kind: ServiceAccount - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-updater-role -subjects: -- kind: ServiceAccount - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-crd-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: default - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-crd-metadata-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-metadata-role -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-query-broker-role -subjects: -- kind: ServiceAccount - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-query-broker-crd-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pl-vizier-crd-role -subjects: -- kind: ServiceAccount - name: query-broker-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-cloud-connector-role -subjects: -- kind: ServiceAccount - name: cloud-conn-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-node-view-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-node-view -subjects: -- kind: ServiceAccount - name: default - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-updater-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-updater-cluster-role -subjects: -- kind: ServiceAccount - name: pl-updater-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-vizier-metadata -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-vizier-metadata-node-view-cluster-binding - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pl-node-view -subjects: -- kind: ServiceAccount - name: metadata-service-account - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CLIENT_TLS_CERT: /certs/client.crt - PL_CLIENT_TLS_KEY: /certs/client.key - PL_SERVER_TLS_CERT: /certs/server.crt - PL_SERVER_TLS_KEY: /certs/server.key - PL_TLS_CA_CERT: /certs/ca.crt -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: pl-cloud-connector-tls-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -data: - PL_CLIENT_TLS_CERT: /certs/client.crt - PL_CLIENT_TLS_KEY: /certs/client.key - PL_SERVER_TLS_CERT: /certs/server.crt - PL_SERVER_TLS_KEY: /certs/server.key - PL_TLS_CA_CERT: /certs/ca.crt -kind: ConfigMap -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: pl-tls-config - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin-service - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - clusterIP: None - ports: - - name: tcp-http2 - port: 59300 - protocol: TCP - targetPort: 59300 - selector: - app: pl-monitoring - component: vizier - name: kelvin - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: vizier-cloud-connector-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50800 - protocol: TCP - targetPort: 50800 - selector: - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - vizier-bootstrap: "true" - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50400 - protocol: TCP - targetPort: 50400 - selector: - app: pl-monitoring - component: vizier - name: vizier-metadata - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker-svc - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - ports: - - name: tcp-http2 - port: 50300 - protocol: TCP - targetPort: 50300 - - name: tcp-grpc-web - port: 50305 - protocol: TCP - targetPort: 50305 - selector: - app: pl-monitoring - component: vizier - name: vizier-query-broker - type: ClusterIP ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: metadata-pv-claim - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 16Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: kelvin - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: kelvin - plane: data - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - - name: PL_SENTRY_DSN - valueFrom: - secretKeyRef: - key: sentry-dsn - name: pl-cluster-secrets - optional: true - - name: PL_HOST_PATH - value: /host - - name: PL_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PL_HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_VIZIER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: TCMALLOC_SAMPLE_PARAMETER - value: "1048576" - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-kelvin_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: app - ports: - - containerPort: 59300 - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - - mountPath: /sys - name: sys - readOnly: true - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-cloud-connector-svc - - name: SERVICE_PORT - value: "50800" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: cc-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-query-broker-svc - - name: SERVICE_PORT - value: "50300" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: qb-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 30 - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - hostPath: - path: /sys - type: Directory - name: sys ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: vizier-cloud-connector - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - vizier-bootstrap: "true" - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-cloud-connector - plane: control - vizier-bootstrap: "true" - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_DEPLOY_KEY - valueFrom: - secretKeyRef: - key: deploy-key - name: {{ if .Values.customDeployKeySecret }}"{{ .Values.customDeployKeySecret }}"{{else}}"pl-deploy-secrets"{{end}} - optional: true - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MAX_EXPECTED_CLOCK_SKEW - value: "2000" - - name: PL_RENEW_PERIOD - value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} - envFrom: - - configMapRef: - name: pl-cloud-config - - configMapRef: - name: pl-cloud-connector-tls-config - - configMapRef: - name: pl-cluster-config - optional: true - image: '{{ .Values.imageRegistry }}/vizier-cloud_connector_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50800 - scheme: HTTPS - name: app - ports: - - containerPort: 50800 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; - until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 - ]; do echo "waiting for ${URL}"; sleep 2; done; - env: - - name: SERVICE_NAME - value: pl-nats-mgmt - - name: SERVICE_PORT - value: "8222" - - name: HEALTH_PATH - value: "" - - name: PROTOCOL - value: http - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: nats-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: cloud-conn-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-query-broker - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - px.dev/metrics_port: "50300" - px.dev/metrics_scrape: "true" - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-query-broker - plane: control - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_CLUSTER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - - name: PL_SENTRY_DSN - valueFrom: - secretKeyRef: - key: sentry-dsn - name: pl-cluster-secrets - optional: true - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_POD_IP_ADDRESS - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_CLOUD_ADDR - valueFrom: - configMapKeyRef: - key: PL_CLOUD_ADDR - name: pl-cloud-config - - name: PL_DATA_ACCESS - value: {{ if .Values.dataAccess }}"{{ .Values.dataAccess }}"{{else}}"Full"{{end}} - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-query_broker_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50300 - scheme: HTTPS - name: app - ports: - - containerPort: 50300 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/readyz"; until [ $(curl - -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do echo "waiting - for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-cloud-connector-svc - - name: SERVICE_PORT - value: "50800" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: cc-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-metadata-svc - - name: SERVICE_PORT - value: "50400" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: mds-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: query-broker-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - configMap: - name: proxy-envoy-config - name: envoy-yaml ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - replicas: 1 - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-metadata - serviceName: vizier-metadata - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - px.dev/metrics_port: "50400" - px.dev/metrics_scrape: "true" - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-metadata - plane: control - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - env: - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_MAX_EXPECTED_CLOCK_SKEW - value: "2000" - - name: PL_RENEW_PERIOD - value: {{ if .Values.electionPeriodMs }}"{{ .Values.electionPeriodMs }}"{{else}}"7500"{{end}} - - name: PL_ETCD_OPERATOR_ENABLED - value: "false" - envFrom: - - configMapRef: - name: pl-tls-config - image: '{{ .Values.imageRegistry }}/vizier-metadata_server_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 50400 - scheme: HTTPS - initialDelaySeconds: 120 - periodSeconds: 10 - name: app - readinessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: 50400 - scheme: HTTPS - initialDelaySeconds: 30 - periodSeconds: 10 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /certs - name: certs - - mountPath: /metadata - name: metadata-volume - initContainers: - - command: - - sh - - -c - - set -xe; URL="${PROTOCOL}://${SERVICE_NAME}:${SERVICE_PORT}${HEALTH_PATH}"; - until [ $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 - ]; do echo "waiting for ${URL}"; sleep 2; done; - env: - - name: SERVICE_NAME - value: pl-nats-mgmt - - name: SERVICE_PORT - value: "8222" - - name: HEALTH_PATH - value: "" - - name: PROTOCOL - value: http - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: nats-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: metadata-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - volumes: - - name: certs - secret: - secretName: service-tls-certs - - name: metadata-volume - persistentVolumeClaim: - claimName: metadata-pv-claim - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-pem - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - selector: - matchLabels: - app: pl-monitoring - component: vizier - name: vizier-pem - template: - metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - name: vizier-pem - plane: data - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: Exists - - key: kubernetes.io/os - operator: In - values: - - linux - - matchExpressions: - - key: beta.kubernetes.io/os - operator: Exists - - key: beta.kubernetes.io/os - operator: In - values: - - linux - containers: - - args: [] - env: - - name: PL_PEM_ENV_VAR_PLACEHOLDER - value: "true" # This is un-used, and is just a placeholder used to templatize our YAMLs for Helm. - {{- range $key, $value := .Values.customPEMFlags}} - - name: {{$key}} - value: "{{$value}}" - {{- end}} - {{- if .Values.datastreamBufferSpikeSize }} - - name: PL_DATASTREAM_BUFFER_SPIKE_SIZE - value: "{{ .Values.datastreamBufferSpikeSize }}" - {{- end}} - {{- if .Values.datastreamBufferSize }} - - name: PL_DATASTREAM_BUFFER_SIZE - value: "{{ .Values.datastreamBufferSize }}" - {{- end}} - - name: TCMALLOC_SAMPLE_PARAMETER - value: "1048576" - - name: PL_CLIENT_TLS_CERT - value: /certs/client.crt - - name: PL_CLIENT_TLS_KEY - value: /certs/client.key - - name: PL_TLS_CA_CERT - value: /certs/ca.crt - - name: PL_DISABLE_SSL - value: "false" - - name: PL_HOST_PATH - value: /host - - name: PL_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: PL_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PL_HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PL_JWT_SIGNING_KEY - valueFrom: - secretKeyRef: - key: jwt-signing-key - name: pl-cluster-secrets - - name: PL_VIZIER_ID - valueFrom: - secretKeyRef: - key: cluster-id - name: pl-cluster-secrets - optional: true - - name: PL_VIZIER_NAME - valueFrom: - secretKeyRef: - key: cluster-name - name: pl-cluster-secrets - optional: true - - name: PL_CLOCK_CONVERTER - value: {{ if .Values.clockConverter }}"{{ .Values.clockConverter }}"{{else}}"default"{{end}} - image: '{{ .Values.imageRegistry }}/vizier-pem_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: pem - resources: - limits: - memory: {{ if .Values.pemMemoryLimit }}"{{ .Values.pemMemoryLimit }}"{{else}}"2Gi"{{end}} - requests: - memory: {{ if .Values.pemMemoryRequest }}"{{ .Values.pemMemoryRequest }}"{{else}}"2Gi"{{end}} - securityContext: - capabilities: - add: - - SYS_PTRACE - - SYS_ADMIN - privileged: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /host/lib - name: host-lib - readOnly: true - - mountPath: /host/var - name: host-var - readOnly: true - - mountPath: /host/boot - name: host-boot - readOnly: true - - mountPath: /host/etc - name: host-etc - readOnly: true - - mountPath: /sys - name: sys - readOnly: true - - mountPath: /certs - name: certs - dnsPolicy: ClusterFirstWithHostNet - hostNetwork: true - hostPID: true - initContainers: - - command: - - sh - - -c - - 'set -x; URL="https://${SERVICE_NAME}:${SERVICE_PORT}/healthz"; until [ - $(curl -m 0.5 -s -o /dev/null -w "%{http_code}" -k ${URL}) -eq 200 ]; do - echo "waiting for ${URL}"; sleep 2; done; ' - env: - - name: SERVICE_NAME - value: vizier-query-broker-svc - - name: SERVICE_PORT - value: "50300" - image: '{{ if .Values.registry }}{{ .Values.registry }}/ghcr.io-pixie-io-pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{else}}ghcr.io/pixie-io/pixie-oss-pixie-dev-public-curl:multiarch-7.87.0@sha256:f7f265d5c64eb4463a43a99b6bf773f9e61a50aaa7cefaf564f43e42549a01dd{{end}}' - name: qb-wait - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - securityContext: - seccompProfile: - type: RuntimeDefault - terminationGracePeriodSeconds: 10 - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists - volumes: - - hostPath: - path: /lib - type: Directory - name: host-lib - - hostPath: - path: /var - type: Directory - name: host-var - - hostPath: - path: /boot - type: Directory - name: host-boot - - hostPath: - path: /etc - type: Directory - name: host-etc - - hostPath: - path: /sys - type: Directory - name: sys - - name: certs - secret: - secretName: service-tls-certs - updateStrategy: - rollingUpdate: - maxUnavailable: 20 - type: RollingUpdate ---- -apiVersion: batch/v1 -kind: Job -metadata: - annotations: - {{if .Values.customAnnotations}}{{range $element := split "," .Values.customAnnotations -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - labels: - {{if .Values.customLabels}}{{range $element := split "," .Values.customLabels -}} - {{ $kv := split "=" $element -}} - {{if eq (len $kv) 2 -}} - {{ $kv._0 }}: "{{ $kv._1 }}" - {{- end}} - {{end}}{{end}} - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cert-provisioner-job - namespace: {{ if .Release.Namespace }}{{ .Release.Namespace }}{{ else }}pl{{ end }} -spec: - backoffLimit: 1 - completions: 1 - parallelism: 1 - template: - metadata: - labels: - app: pl-monitoring - component: vizier - vizier-bootstrap: "true" - name: cert-provisioner-job - spec: - containers: - - env: - - name: PL_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - envFrom: - - configMapRef: - name: pl-cloud-config - - configMapRef: - name: pl-cluster-config - optional: true - image: '{{ .Values.imageRegistry }}/vizier-cert_provisioner_image:{{ .Values.imageTag }}' - imagePullPolicy: Always - name: provisioner - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - restartPolicy: Never - securityContext: - fsGroup: 10100 - runAsGroup: 10100 - runAsNonRoot: true - runAsUser: 10100 - seccompProfile: - type: RuntimeDefault - serviceAccountName: pl-cert-provisioner-service-account - tolerations: - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: amd64 - - effect: NoSchedule - key: kubernetes.io/arch - operator: Equal - value: arm64 - - effect: NoExecute - key: kubernetes.io/arch - operator: Equal - value: arm64 - -{{- end}} \ No newline at end of file diff --git a/vizier-chart/values.yaml b/vizier-chart/values.yaml deleted file mode 100644 index e2eee6365bb..00000000000 --- a/vizier-chart/values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -deployKey: $PIXIE_DEPLOY_KEY -clusterName: honeypixie -cloudAddr: getcosmic.ai -devCloudNamespace: plc -namespace: pl -imageTag: 2025-05-09_14-20-42.033_UTC -imageRegistry: mbgurcay From 37cb31b59797e8875234a455065f3cf5f69b2f0e Mon Sep 17 00:00:00 2001 From: Dom Del Nano Date: Wed, 4 Feb 2026 06:55:39 -0800 Subject: [PATCH 86/86] Use correct runner name and don't use remote exec Signed-off-by: Dom Del Nano --- .github/workflows/build_and_test.yaml | 12 ++++++++---- ci/github/bazelrc | 3 --- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build_and_test.yaml b/.github/workflows/build_and_test.yaml index 4e29338249a..58d51489c78 100644 --- a/.github/workflows/build_and_test.yaml +++ b/.github/workflows/build_and_test.yaml @@ -36,7 +36,7 @@ jobs: image-base-name: "dev_image_with_extras" ref: ${{ needs.env-protect-setup.outputs.ref }} clang-tidy: - runs-on: oracle-16cpu-64gb-x86-64 + runs-on: oracle-vm-16cpu-64gb-x86-64 needs: [authorize, env-protect-setup, get-dev-image] container: image: ${{ needs.get-dev-image.outputs.image-with-tag }} @@ -64,7 +64,7 @@ jobs: code-coverage: if: github.event_name == 'push' needs: [authorize, env-protect-setup, get-dev-image] - runs-on: oracle-16cpu-64gb-x86-64 + runs-on: oracle-vm-16cpu-64gb-x86-64 container: image: ${{ needs.get-dev-image.outputs.image-with-tag }} steps: @@ -88,7 +88,7 @@ jobs: ./ci/collect_coverage.sh -u -b main -c "$(git rev-parse HEAD)" -r pixie-io/pixie generate-matrix: needs: [authorize, env-protect-setup, get-dev-image] - runs-on: oracle-16cpu-64gb-x86-64 + runs-on: oracle-vm-16cpu-64gb-x86-64 container: image: ${{ needs.get-dev-image.outputs.image-with-tag }} outputs: @@ -120,7 +120,7 @@ jobs: bazel_tests_* build-and-test: needs: [authorize, env-protect-setup, get-dev-image, generate-matrix] - runs-on: oracle-16cpu-64gb-x86-64 + runs-on: oracle-vm-16cpu-64gb-x86-64 permissions: contents: read actions: read @@ -160,6 +160,10 @@ jobs: run: | # Github actions container runner creates a docker network without IPv6 support. We enable it manually. sysctl -w net.ipv6.conf.lo.disable_ipv6=0 + + # Our qemu builds require unprivileged user namespaces to run. + sysctl -w kernel.unprivileged_userns_clone=1 + sysctl -w kernel.apparmor_restrict_unprivileged_userns=0 ./scripts/bazel_ignore_codes.sh test ${{ matrix.args }} --target_pattern_file=target_files/${{ matrix.tests }} \ 2> >(tee bazel_stderr) - name: Parse junit reports diff --git a/ci/github/bazelrc b/ci/github/bazelrc index f4b0cdb5ac0..8de37643b0c 100644 --- a/ci/github/bazelrc +++ b/ci/github/bazelrc @@ -5,9 +5,6 @@ common --color=yes # a given run. common --keep_going -# Always use remote exec -build --config=remote - build --build_metadata=HOST=github-actions build --build_metadata=USER=github-actions build --build_metadata=REPO_URL=https://github.com/pixie-io/pixie