diff --git a/deploy/services/helm-charts/dss/templates/yugabyte-loadbalancers.yaml b/deploy/services/helm-charts/dss/templates/yugabyte-loadbalancers.yaml index 5de93c9e8..e5553b611 100644 --- a/deploy/services/helm-charts/dss/templates/yugabyte-loadbalancers.yaml +++ b/deploy/services/helm-charts/dss/templates/yugabyte-loadbalancers.yaml @@ -4,7 +4,95 @@ # Master nodes Gateways {{- range $i, $lb := .Values.loadBalancers.yugabyteMasterNodes }} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: yb-proxy-config-{{$i}} +data: + haproxy.cfg: | + global + log stdout format raw local0 + maxconn 4096 + + defaults + mode tcp + log global + # We set high timeouts to avoid disconnects with low activitiy + timeout client 12h + timeout server 12h + timeout tunnel 12h + timeout connect 5s + # We enable TCP keep alives on client and server side + option clitcpka + option srvtcpka + # K8s services may not be ready when HaProxy start, we ignore errors + default-server init-addr libc,none + + resolvers dns + parse-resolv-conf + # We limit DNS validity to 5s to react to changes on K8s services + hold valid 5s + + frontend master-grpc-f + bind :7100 + default_backend master-grpc-b + + backend master-grpc-b + server yb-master-{{$i}} yb-master-{{$i}}.yb-masters.default.svc.cluster.local:7100 check resolvers dns + + frontend tserver-grpc-f + bind :9100 + default_backend tserver-grpc-b + + backend tserver-grpc-b + server yb-tserver-{{$i}} yb-tserver-{{$i}}.yb-tservers.default.svc.cluster.local:9100 check resolvers dns + --- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + name: yugabyte-proxy-{{$i}} + name: yugabyte-proxy-{{$i}} +spec: + replicas: 2 # We deploy two instances to provide resilience if one Kubernetes node goes down. + selector: + matchLabels: + app: yugabyte-proxy-{{$i}} + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + app: yugabyte-proxy-{{$i}} + annotations: + release: {{$.Release.Name}}/{{$.Release.Revision}} + spec: + containers: + - name: yugabyte-proxy + image: "haproxy:3.3" + imagePullPolicy: "Always" + ports: + - containerPort: 7100 + name: master-grpc + - containerPort: 9100 + name: tserver-grpc + volumeMounts: + - name: config-volume + mountPath: /usr/local/etc/haproxy/ + readOnly: true + volumes: + - name: config-volume + configMap: + name: yb-proxy-config-{{$i}} + +--- + apiVersion: v1 kind: Service metadata: @@ -33,8 +121,7 @@ spec: targetPort: 9100 publishNotReadyAddresses: true selector: - yugabytedUi: "true" - apps.kubernetes.io/pod-index: "{{$i}}" + app: yugabyte-proxy-{{$i}} type: LoadBalancer {{- end }} {{- end }} diff --git a/deploy/services/tanka/yugabyte-auxiliary.libsonnet b/deploy/services/tanka/yugabyte-auxiliary.libsonnet index 53c1e0d55..d7beb0159 100644 --- a/deploy/services/tanka/yugabyte-auxiliary.libsonnet +++ b/deploy/services/tanka/yugabyte-auxiliary.libsonnet @@ -100,7 +100,7 @@ local yugabyteLB(metadata, name, ip) = --placement_zone=%s --use_private_ip=zone --node_to_node_encryption_use_client_certificates=true - --ysql_hba_conf_csv='hostssl all all 0.0.0.0/0 cert' + --ysql_hba_conf_csv=hostssl all all 0.0.0.0/0 cert ||| % [ std.join(",", metadata.yugabyte.masterAddresses), metadata.yugabyte.tserver.rpc_bind_addresses, @@ -202,8 +202,7 @@ local yugabyteLB(metadata, name, ip) = }, spec+: { selector: { - yugabytedUi: "true", - "apps.kubernetes.io/pod-index": '' + i, + app: 'yugabyte-proxy-' + i }, publishNotReadyAddresses: true, ports: [ @@ -218,6 +217,114 @@ local yugabyteLB(metadata, name, ip) = ], }, } for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1) + }, + ProxyConfig: { + ["yb-proxy-config-" + i]: base.ConfigMap(metadata, 'yb-proxy-config-' + i) { + data: { + "haproxy.cfg": ||| + global + log stdout format raw local0 + maxconn 4096 + + defaults + mode tcp + log global + # We set high timeouts to avoid disconnects with low activitiy + timeout client 12h + timeout server 12h + timeout tunnel 12h + timeout connect 5s + # We enable TCP keep alives on client and server side + option clitcpka + option srvtcpka + # K8s services may not be ready when HaProxy start, we ignore errors + default-server init-addr libc,none + + resolvers dns + parse-resolv-conf + # We limit DNS validity to 5s to react to changes on K8s services + hold valid 5s + + frontend master-grpc-f + bind :7100 + default_backend master-grpc-b + + backend master-grpc-b + server yb-master-%s yb-master-%s.yb-masters.%s.svc.cluster.local:7100 check resolvers dns + + frontend tserver-grpc-f + bind :9100 + default_backend tserver-grpc-b + + backend tserver-grpc-b + server yb-tserver-%s yb-tserver-%s.yb-tservers.%s.svc.cluster.local:9100 check resolvers dns + ||| % [i, i, metadata.namespace, i, i, metadata.namespace] + } + } for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1) + }, + Proxy: { + ["yugabyte-proxy-" + i]: base.Deployment(metadata, 'yugabyte-proxy-' + i) { + apiVersion: 'apps/v1', + kind: 'Deployment', + metadata+: { + namespace: metadata.namespace, + labels: { + name: 'yugabyte-proxy-' + i + } + }, + spec+: { + replicas: 2, # We deploy two instances to provide resilience if one nodes goes down. + selector: { + matchLabels: { + app: 'yugabyte-proxy-' + i + } + }, + strategy: { + rollingUpdate: { + maxSurge: "25%", + maxUnavailable: "25%", + }, + type: "RollingUpdate", + }, + template+: { + metadata+: { + labels: { + app: 'yugabyte-proxy-' + i + } + }, + spec+: { + volumes: [ + { + name: "config-volume", + configMap: { + name: "yb-proxy-config-" + i, + } + } + ], + soloContainer:: base.Container('yugabyte-proxy') { + image: "haproxy:3.3", + imagePullPolicy: 'Always', + ports: [ + { + containerPort: 7100, + name: 'master-grpc', + }, + { + containerPort: 9100, + name: 'tserver-grpc', + }, + ], + volumeMounts: [ + { + name: "config-volume", + mountPath: "/usr/local/etc/haproxy/", + } + ], + }, + }, + }, + }, + } for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1) }, } else {} } diff --git a/docs/architecture.md b/docs/architecture.md index 16d387a45..aefdb786a 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -11,12 +11,29 @@ multiple organizations to each host one DSS instance that is interoperable with each other organization's DSS instance. A DSS pool with three participating organizations (USSs) will have an architecture similar to the diagram below. -_**Note** that the diagram shows 2 stateful sets per DSS instance. Currently, the -helm and tanka deployments produce 3 stateful sets per DSS instance. However, after -Issue #481 is resolved, this is expected to be reduced to 2 stateful sets._ +_**Note** that the diagrams bellow shows 2 stateful sets per DSS instance. Currently, the helm and tanka deployments produce 3 stateful sets per DSS instance. However, after Issue #481 is resolved, this is expected to be reduced to 2 stateful sets._ + +### Certificates + +This diagram shows how certificates are shared. It applies to both CockroachDB and Yugabyte deployments. + +![Pool architecture diagram](assets/generated/pool_architecture_certs.png) + +### CochroachDB ![Pool architecture diagram](assets/generated/pool_architecture.png) +### Yugabyte + +Detail on an instance level +![Pool architecture diagram with Yugabyte](assets/generated/pool_architecture_yugabyte_instance.png) + +Top level simplified view, with one replica shown and yugabyte services regrouped in one box. +![Pool architecture diagram with Yugabyte](assets/generated/pool_architecture_yugabyte.png) + +To reduce the number of required public load balancers, we do use an intermediate reverse proxy to expose the ports of Yugabyte master and tserver on a shared public IP per stateful set instance. +Usual Kubernetes load balancers can't assign connection based on ports out of the box, so we use the reverse proxy to dispatch connections on both services depending on the connected port. + ### Terminology notes See [teminology notes](operations/pooling.md#terminology-notes). diff --git a/docs/assets/generated/pool_architecture.png b/docs/assets/generated/pool_architecture.png index c550eb671..a6b236482 100644 Binary files a/docs/assets/generated/pool_architecture.png and b/docs/assets/generated/pool_architecture.png differ diff --git a/docs/assets/generated/pool_architecture_certs.png b/docs/assets/generated/pool_architecture_certs.png new file mode 100644 index 000000000..56458d058 Binary files /dev/null and b/docs/assets/generated/pool_architecture_certs.png differ diff --git a/docs/assets/generated/pool_architecture_yugabyte.png b/docs/assets/generated/pool_architecture_yugabyte.png new file mode 100644 index 000000000..7c1ae6dd7 Binary files /dev/null and b/docs/assets/generated/pool_architecture_yugabyte.png differ diff --git a/docs/assets/generated/pool_architecture_yugabyte_instance.png b/docs/assets/generated/pool_architecture_yugabyte_instance.png new file mode 100644 index 000000000..2e8d8bb2f Binary files /dev/null and b/docs/assets/generated/pool_architecture_yugabyte_instance.png differ diff --git a/docs/assets/pool_architecture.gv b/docs/assets/pool_architecture.gv index 467952849..3b2884241 100644 --- a/docs/assets/pool_architecture.gv +++ b/docs/assets/pool_architecture.gv @@ -1,99 +1,86 @@ // To render: // dot -Tpng -ogenerated/pool_architecture.png pool_architecture.gv digraph G { - node [shape=box]; + node [shape=box, colorscheme=paired8]; + graph [dpi = 300]; DSSClient [label="DSS client"] - LoadBalancer1 [label="TLS termination +\nload balancer"] - CoreService1a [label="Core Service"]; - CoreService1b [label="Core Service"]; - CRDB1a [label="CRDB 1a"]; - CRDB1b [label="CRDB 1b"]; - PrivateKey1 [label="Certs signed by\nUSS1 private key"] + LoadBalancer1 [label="TLS termination +\nload balancer",color=1] + CoreService1a [label="Core Service",color=2]; + CoreService1b [label="Core Service",color=2]; + CRDB1a [label="CRDB 1A",color=3]; + CRDB1b [label="CRDB 1B",color=3]; - LoadBalancer2 [label="TLS termination +\nload balancer"] - CoreService2a [label="Core Service"]; - CoreService2b [label="Core Service"]; - CRDB2a [label="CRDB 2a"]; - CRDB2b [label="CRDB 2b"]; - PrivateKey2 [label="Certs signed by\nUSS2 private key"] + LoadBalancer2 [label="TLS termination +\nload balancer",color=1] + CoreService2a [label="Core Service",color=2]; + CoreService2b [label="Core Service",color=2]; + CRDB2a [label="CRDB 2A",color=3]; + CRDB2b [label="CRDB 2B",color=3]; - LoadBalancer3 [label="TLS termination +\nload balancer"] - CoreService3a [label="Core Service"]; - CoreService3b [label="Core Service"]; - CRDB3a [label="CRDB 3a"]; - CRDB3b [label="CRDB 3b"]; - PrivateKey3 [label="Certs signed by\nUSS3 private key"] + LoadBalancer3 [label="TLS termination +\nload balancer",color=1] + CoreService3a [label="Core Service",color=2]; + CoreService3b [label="Core Service",color=2]; + CRDB3a [label="CRDB 3A",color=3]; + CRDB3b [label="CRDB 3B",color=3]; - Certs [label="Shared ca.crt accepts public keys\nof all USSs' private keys"]; - PublicInternet [label="Public Internet"]; + { rank=sink; PublicInternet [label="Public Internet"]; } subgraph cluster_0 { - label="USS1's DSS instance" - subgraph cluster_1 { - label="Stateful set a"; - CoreService1a -> CRDB1a; - } - subgraph cluster_2 { - label="Stateful set b"; - CoreService1b -> CRDB1b; - } - CRDB1a -> PrivateKey1 [dir=back,style=dotted]; - CRDB1b -> PrivateKey1 [dir=back,style=dotted]; - CoreService1b -> CRDB1a; - CoreService1a -> CRDB1b; - LoadBalancer1 -> CoreService1a - LoadBalancer1 -> CoreService1b + label="USS1's DSS instance" + subgraph cluster_1 { + label="Replica A"; + CoreService1a -> CRDB1a; + } + subgraph cluster_2 { + label="Replica B"; + CoreService1b -> CRDB1b; + } + CoreService1b -> CRDB1a; + CoreService1a -> CRDB1b; + LoadBalancer1 -> CoreService1a + LoadBalancer1 -> CoreService1b + CRDB1a -> CRDB1b [dir=both]; } subgraph cluster_4 { - label="USS2's DSS instance" - subgraph cluster_5 { - label="Stateful set a"; - CoreService2a -> CRDB2a; - } - subgraph cluster_6 { - label="Stateful set b"; - CoreService2b -> CRDB2b; - } - CRDB2a -> PrivateKey2 [dir=back,style=dotted]; - CRDB2b -> PrivateKey2 [dir=back,style=dotted]; - CoreService2b -> CRDB2a; - CoreService2a -> CRDB2b; - LoadBalancer2 -> CoreService2a - LoadBalancer2 -> CoreService2b + label="USS2's DSS instance" + subgraph cluster_5 { + label="Replica A"; + CoreService2a -> CRDB2a; + } + subgraph cluster_6 { + label="Replica B"; + CoreService2b -> CRDB2b; + } + CoreService2b -> CRDB2a; + CoreService2a -> CRDB2b; + LoadBalancer2 -> CoreService2a + LoadBalancer2 -> CoreService2b + CRDB2a -> CRDB2b [dir=both]; } subgraph cluster_8 { - label="USS3's DSS instance" - subgraph cluster_9 { - label="Stateful set a"; - CoreService3a -> CRDB3a; - } - subgraph cluster_10 { - label="Stateful set b"; - CoreService3b -> CRDB3b; - } - CRDB3a -> PrivateKey3 [dir=back,style=dotted]; - CRDB3b -> PrivateKey3 [dir=back,style=dotted]; - CoreService3b -> CRDB3a; - CoreService3a -> CRDB3b; - LoadBalancer3 -> CoreService3a - LoadBalancer3 -> CoreService3b + label="USS3's DSS instance" + subgraph cluster_9 { + label="Replica A"; + CoreService3a -> CRDB3a; + } + subgraph cluster_10 { + label="Replica B"; + CoreService3b -> CRDB3b; + } + CoreService3b -> CRDB3a; + CoreService3a -> CRDB3b; + LoadBalancer3 -> CoreService3a + LoadBalancer3 -> CoreService3b + CRDB3a -> CRDB3b [dir=both]; } DSSClient -> LoadBalancer1; DSSClient -> LoadBalancer2; DSSClient -> LoadBalancer3; - CRDB1a -> Certs [dir=back,style=dotted]; - CRDB1b -> Certs [dir=back,style=dotted]; - CRDB2a -> Certs [dir=back,style=dotted]; - CRDB2b -> Certs [dir=back,style=dotted]; - CRDB3a -> Certs [dir=back,style=dotted]; - CRDB3b -> Certs [dir=back,style=dotted]; - CRDB1a -> PublicInternet [dir=both]; CRDB1b -> PublicInternet [dir=both]; CRDB2a -> PublicInternet [dir=both]; @@ -101,8 +88,4 @@ digraph G { CRDB3a -> PublicInternet [dir=both]; CRDB3b -> PublicInternet [dir=both]; - PrivateKey1 -> Certs [dir=none,color=transparent]; - PrivateKey2 -> Certs [dir=none,color=transparent]; - PrivateKey3 -> Certs [dir=none,color=transparent]; - Certs -> PublicInternet [dir=none,color=transparent]; } diff --git a/docs/assets/pool_architecture_certs.gv b/docs/assets/pool_architecture_certs.gv new file mode 100644 index 000000000..076387ff4 --- /dev/null +++ b/docs/assets/pool_architecture_certs.gv @@ -0,0 +1,84 @@ +// To render: +// dot -Tpng -ogenerated/pool_architecture_certs.png pool_architecture_certs.gv +digraph G { + node [shape=box, colorscheme=paired8]; + graph [dpi = 300]; + + CoreService1a [label="Core Service",color=2]; + CoreService1b [label="Core Service",color=2]; + DB1a [label="Datastore 1A",color=3]; + DB1b [label="Datastore 1B",color=3]; + PrivateKey1 [label="Certs signed by\nUSS1 private key",color=6] + + CoreService2a [label="Core Service",color=2]; + CoreService2b [label="Core Service",color=2]; + DB2a [label="Datastore 2A",color=3]; + DB2b [label="Datastore 2B",color=3]; + PrivateKey2 [label="Certs signed by\nUSS2 private key",color=6] + + CoreService3a [label="Core Service",color=2]; + CoreService3b [label="Core Service",color=2]; + DB3a [label="Datastore 3A",color=3]; + DB3b [label="Datastore 3B",color=3]; + PrivateKey3 [label="Certs signed by\nUSS3 private key",color=6] + + Certs [label="Shared ca.crt accepts public keys\nof all USSs' private keys",color=6]; + + subgraph cluster_0 { + label="USS1's DSS instance" + subgraph cluster_1 { + label="Replica A"; + CoreService1a; + { rank=sink; DB1a; } + } + subgraph cluster_2 { + label="Replica B"; + CoreService1b; + { rank=sink; DB1b; } + } + DB1a -> PrivateKey1 [dir=back,style=dotted]; + DB1b -> PrivateKey1 [dir=back,style=dotted]; + CoreService1a -> PrivateKey1 [dir=back,style=dotted]; + CoreService1b -> PrivateKey1 [dir=back,style=dotted]; + } + + subgraph cluster_4 { + label="USS2's DSS instance" + subgraph cluster_5 { + label="Replica A"; + CoreService2a; + { rank=sink; DB2a; } + } + subgraph cluster_6 { + label="Replica B"; + CoreService2b; + { rank=sink; DB2b; } + } + DB2a -> PrivateKey2 [dir=back,style=dotted]; + DB2b -> PrivateKey2 [dir=back,style=dotted]; + CoreService2a -> PrivateKey2 [dir=back,style=dotted]; + CoreService2b -> PrivateKey2 [dir=back,style=dotted]; + } + + subgraph cluster_8 { + label="USS3's DSS instance" + subgraph cluster_9 { + label="Replica A"; + CoreService3a; + { rank=sink; DB3a; } + } + subgraph cluster_10 { + label="Replica B"; + CoreService3b; + { rank=sink; DB3b; } + } + DB3a -> PrivateKey3 [dir=back,style=dotted]; + DB3b -> PrivateKey3 [dir=back,style=dotted]; + CoreService3a -> PrivateKey3 [dir=back,style=dotted]; + CoreService3b -> PrivateKey3 [dir=back,style=dotted]; + } + + PrivateKey1 -> Certs [dir=both,style=dotted]; + PrivateKey2 -> Certs [dir=both,style=dotted]; + PrivateKey3 -> Certs [dir=both,style=dotted]; +} diff --git a/docs/assets/pool_architecture_yugabyte.gv b/docs/assets/pool_architecture_yugabyte.gv new file mode 100644 index 000000000..122b5586f --- /dev/null +++ b/docs/assets/pool_architecture_yugabyte.gv @@ -0,0 +1,74 @@ +// To render: +// dot -Tpng -ogenerated/pool_architecture_yugabyte.png pool_architecture_yugabyte.gv +digraph G { + node [shape=box, colorscheme=paired8]; + graph [dpi = 300]; + + DSSClient [label="DSS client"] + + LoadBalancer1 [label="TLS termination +\nload balancer",color=1] + CoreService1 [label="Core Service",color=2]; + YBDB1 [label="YBDB Services",color=3]; + YBLoadBalancer1 [label="TCP Reverse Proxies",color=5] + + LoadBalancer2 [label="TLS termination +\nload balancer",color=1] + CoreService2 [label="Core Services",color=2]; + YBDB2 [label="YBDB Services",color=3]; + YBLoadBalancer2 [label="TCP Reverse Proxies",color=5] + + LoadBalancer3 [label="TLS termination +\nload balancer",color=1] + CoreService3 [label="Core Services",color=2]; + YBDB3 [label="YBDB Services",color=3]; + YBLoadBalancer3 [label="TCP Reverse Proxies",color=5] + + PublicInternet [label="Public Internet"]; + + subgraph cluster_0 { + label="USS1's DSS instance" + subgraph cluster_1 { + label="Replica X"; + CoreService1 -> YBDB1; + YBDB1; + YBLoadBalancer1 -> YBDB1; + { rank=sink; YBLoadBalancer1; } + } + LoadBalancer1 -> CoreService1 + } + + subgraph cluster_4 { + label="USS2's DSS instance" + subgraph cluster_5 { + label="Replica X"; + CoreService2 -> YBDB2; + YBDB2; + YBLoadBalancer2 -> YBDB2; + { rank=sink; YBLoadBalancer2; } + } + LoadBalancer2 -> CoreService2 + } + + subgraph cluster_8 { + label="USS3's DSS instance" + subgraph cluster_9 { + label="Replica X"; + YBDB3; + CoreService3 -> YBDB3; + YBLoadBalancer3 -> YBDB3; + { rank=sink; YBLoadBalancer3; } + } + LoadBalancer3 -> CoreService3 + } + + DSSClient -> LoadBalancer1; + DSSClient -> LoadBalancer2; + DSSClient -> LoadBalancer3; + + YBDB1 -> PublicInternet; + YBDB2 -> PublicInternet; + YBDB3 -> PublicInternet; + + YBLoadBalancer1 -> PublicInternet [dir=back]; + YBLoadBalancer2 -> PublicInternet [dir=back]; + YBLoadBalancer3 -> PublicInternet [dir=back]; + +} diff --git a/docs/assets/pool_architecture_yugabyte_instance.gv b/docs/assets/pool_architecture_yugabyte_instance.gv new file mode 100644 index 000000000..e22ed7a29 --- /dev/null +++ b/docs/assets/pool_architecture_yugabyte_instance.gv @@ -0,0 +1,52 @@ +// To render: +// dot -Tpng -ogenerated/pool_architecture_yugabyte_instance.png pool_architecture_yugabyte_instance.gv +digraph G { + node [shape=box, colorscheme=paired8]; + graph [dpi = 300]; + + DSSClient [label="DSS client"] + + LoadBalancer1 [label="TLS termination +\nload balancer",color=1] + CoreService1a [label="Core Service",color=2]; + CoreService1b [label="Core Service",color=2]; + YBDBM1a [label="YBDB Master 1A",color=3]; + YBDBM1b [label="YBDB Master 1B",color=3]; + YBDBT1a [label="YBDB TServer 1A",color=4]; + YBDBT1b [label="YBDB TServer 1B",color=4]; + YBLoadBalancer1a [label="TCP Reverse Proxies A",color=5] + YBLoadBalancer1b [label="TCP Reverse Proxies B",color=5] + + subgraph cluster_0 { + label="USS1's DSS instance" + subgraph cluster_1 { + label="Replica A"; + CoreService1a -> YBDBT1a; + YBDBM1a; + YBLoadBalancer1a -> YBDBM1a; + YBLoadBalancer1a -> YBDBT1a; + YBDBM1a -> YBDBT1a [dir=both]; + { rank=sink; YBLoadBalancer1a; } + } + subgraph cluster_2 { + label="Replica B"; + CoreService1b -> YBDBT1b; + YBDBM1b; + YBLoadBalancer1b -> YBDBM1b; + YBLoadBalancer1b -> YBDBT1b; + YBDBM1b -> YBDBT1b [dir=both]; + { rank=sink; YBLoadBalancer1b; } + } + CoreService1b -> YBDBT1a; + CoreService1a -> YBDBT1b; + LoadBalancer1 -> CoreService1a + LoadBalancer1 -> CoreService1b + + YBDBM1a -> YBDBT1b [dir=both]; + YBDBM1b -> YBDBT1a [dir=both]; + YBDBM1a -> YBDBM1b [dir=both]; + YBDBT1a -> YBDBT1b [dir=both]; + } + + DSSClient -> LoadBalancer1; + +}