Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,91 @@

# Master nodes Gateways
{{- range $i, $lb := .Values.loadBalancers.yugabyteMasterNodes }}

---
apiVersion: v1
kind: ConfigMap
metadata:
name: yb-proxy-config-{{$i}}
data:
haproxy.cfg: |
global
log stdout format raw local0
maxconn 4096

defaults
mode tcp
log global
timeout client 12h
timeout server 12h
timeout tunnel 12h
timeout connect 5s
option clitcpka
option srvtcpka
default-server init-addr libc,none

resolvers dns
parse-resolv-conf
hold valid 5s

frontend master-grpc-f
bind :7100
default_backend master-grpc-b

backend master-grpc-b
server yb-master-{{$i}} yb-master-{{$i}}.yb-masters.default.svc.cluster.local:7100 check resolvers dns

frontend tserver-grpc-f
bind :9100
default_backend tserver-grpc-b

backend tserver-grpc-b
server yb-tserver-{{$i}} yb-tserver-{{$i}}.yb-tservers.default.svc.cluster.local:9100 check resolvers dns

---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
name: yugabyte-proxy-{{$i}}
name: yugabyte-proxy-{{$i}}
spec:
replicas: 2 # We deploy two instances to provide resilience if one nodes goes down.
selector:
matchLabels:
app: yugabyte-proxy-{{$i}}
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: yugabyte-proxy-{{$i}}
annotations:
release: {{$.Release.Name}}/{{$.Release.Revision}}
spec:
containers:
- name: yugabyte-proxy
image: "haproxy:3.3"
imagePullPolicy: "Always"
ports:
- containerPort: 7100
name: master-grpc
- containerPort: 9100
name: tserver-grpc
volumeMounts:
- name: config-volume
mountPath: /usr/local/etc/haproxy/
readOnly: true
volumes:
- name: config-volume
configMap:
name: yb-proxy-config-{{$i}}

---

apiVersion: v1
kind: Service
metadata:
Expand Down Expand Up @@ -33,8 +117,7 @@ spec:
targetPort: 9100
publishNotReadyAddresses: true
selector:
yugabytedUi: "true"
apps.kubernetes.io/pod-index: "{{$i}}"
app: yugabyte-proxy-{{$i}}
type: LoadBalancer
{{- end }}
{{- end }}
109 changes: 106 additions & 3 deletions deploy/services/tanka/yugabyte-auxiliary.libsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ local yugabyteLB(metadata, name, ip) =
--placement_zone=%s
--use_private_ip=zone
--node_to_node_encryption_use_client_certificates=true
--ysql_hba_conf_csv='hostssl all all 0.0.0.0/0 cert'
--ysql_hba_conf_csv=hostssl all all 0.0.0.0/0 cert
||| % [
std.join(",", metadata.yugabyte.masterAddresses),
metadata.yugabyte.tserver.rpc_bind_addresses,
Expand Down Expand Up @@ -202,8 +202,7 @@ local yugabyteLB(metadata, name, ip) =
},
spec+: {
selector: {
yugabytedUi: "true",
"apps.kubernetes.io/pod-index": '' + i,
app: 'yugabyte-proxy-' + i
},
publishNotReadyAddresses: true,
ports: [
Expand All @@ -218,6 +217,110 @@ local yugabyteLB(metadata, name, ip) =
],
},
} for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1)
},
ProxyConfig: {
["yb-proxy-config-" + i]: base.ConfigMap(metadata, 'yb-proxy-config-' + i) {
data: {
"haproxy.cfg": |||
global
log stdout format raw local0
maxconn 4096

defaults
mode tcp
log global
timeout client 12h
timeout server 12h
timeout tunnel 12h
timeout connect 5s
option clitcpka
option srvtcpka
default-server init-addr libc,none

resolvers dns
parse-resolv-conf
hold valid 5s

frontend master-grpc-f
bind :7100
default_backend master-grpc-b

backend master-grpc-b
server yb-master-%s yb-master-%s.yb-masters.%s.svc.cluster.local:7100 check resolvers dns

frontend tserver-grpc-f
bind :9100
default_backend tserver-grpc-b

backend tserver-grpc-b
server yb-tserver-%s yb-tserver-%s.yb-tservers.%s.svc.cluster.local:9100 check resolvers dns
||| % [i, i, metadata.namespace, i, i, metadata.namespace]
}
} for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1)
},
Proxy: {
["yugabyte-proxy-" + i]: base.Deployment(metadata, 'yugabyte-proxy-' + i) {
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata+: {
namespace: metadata.namespace,
labels: {
name: 'yugabyte-proxy-' + i
}
},
spec+: {
replicas: 2, # We deploy two instances to provide resilience if one nodes goes down.
selector: {
matchLabels: {
app: 'yugabyte-proxy-' + i
}
},
strategy: {
rollingUpdate: {
maxSurge: "25%",
maxUnavailable: "25%",
},
type: "RollingUpdate",
},
template+: {
metadata+: {
labels: {
app: 'yugabyte-proxy-' + i
}
},
spec+: {
volumes: [
{
name: "config-volume",
configMap: {
name: "yb-proxy-config-" + i,
}
}
],
soloContainer:: base.Container('yugabyte-proxy') {
image: "haproxy:3.3",
imagePullPolicy: 'Always',
ports: [
{
containerPort: 7100,
name: 'master-grpc',
},
{
containerPort: 9100,
name: 'tserver-grpc',
},
],
volumeMounts: [
{
name: "config-volume",
mountPath: "/usr/local/etc/haproxy/",
}
],
},
},
},
},
} for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1)
},
} else {}
}
6 changes: 6 additions & 0 deletions docs/architecture.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,14 @@ _**Note** that the diagram shows 2 stateful sets per DSS instance. Currently, t
helm and tanka deployments produce 3 stateful sets per DSS instance. However, after
Issue #481 is resolved, this is expected to be reduced to 2 stateful sets._

### CochroachDB

![Pool architecture diagram](assets/generated/pool_architecture.png)

### Yugabyte

![Pool architecture diagram with Yugabyte](assets/generated/pool_architecture_yugabyte.png)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add a description and the reason for introducing the reverse proxy here please as a note.

### Terminology notes

See [teminology notes](operations/pooling.md#terminology-notes).
Expand Down
Binary file modified docs/assets/generated/pool_architecture.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
130 changes: 65 additions & 65 deletions docs/assets/pool_architecture.gv
Original file line number Diff line number Diff line change
@@ -1,86 +1,86 @@
// To render:
// dot -Tpng -ogenerated/pool_architecture.png pool_architecture.gv
digraph G {
node [shape=box];
node [shape=box, colorscheme=paired8];

DSSClient [label="DSS client"]

LoadBalancer1 [label="TLS termination +\nload balancer"]
CoreService1a [label="Core Service"];
CoreService1b [label="Core Service"];
CRDB1a [label="CRDB 1a"];
CRDB1b [label="CRDB 1b"];
PrivateKey1 [label="Certs signed by\nUSS1 private key"]
LoadBalancer1 [label="TLS termination +\nload balancer",color=1]
CoreService1a [label="Core Service",color=2];
CoreService1b [label="Core Service",color=2];
CRDB1a [label="CRDB 1A",color=3];
CRDB1b [label="CRDB 1B",color=3];
PrivateKey1 [label="Certs signed by\nUSS1 private key",color=6]

LoadBalancer2 [label="TLS termination +\nload balancer"]
CoreService2a [label="Core Service"];
CoreService2b [label="Core Service"];
CRDB2a [label="CRDB 2a"];
CRDB2b [label="CRDB 2b"];
PrivateKey2 [label="Certs signed by\nUSS2 private key"]
LoadBalancer2 [label="TLS termination +\nload balancer",color=1]
CoreService2a [label="Core Service",color=2];
CoreService2b [label="Core Service",color=2];
CRDB2a [label="CRDB 2A",color=3];
CRDB2b [label="CRDB 2B",color=3];
PrivateKey2 [label="Certs signed by\nUSS2 private key",color=6]

LoadBalancer3 [label="TLS termination +\nload balancer"]
CoreService3a [label="Core Service"];
CoreService3b [label="Core Service"];
CRDB3a [label="CRDB 3a"];
CRDB3b [label="CRDB 3b"];
PrivateKey3 [label="Certs signed by\nUSS3 private key"]
LoadBalancer3 [label="TLS termination +\nload balancer",color=1]
CoreService3a [label="Core Service",color=2];
CoreService3b [label="Core Service",color=2];
CRDB3a [label="CRDB 3A",color=3];
CRDB3b [label="CRDB 3B",color=3];
PrivateKey3 [label="Certs signed by\nUSS3 private key",color=6]

Certs [label="Shared ca.crt accepts public keys\nof all USSs' private keys"];
Certs [label="Shared ca.crt accepts public keys\nof all USSs' private keys",color=6];
PublicInternet [label="Public Internet"];

subgraph cluster_0 {
label="USS1's DSS instance"
subgraph cluster_1 {
label="Stateful set a";
CoreService1a -> CRDB1a;
}
subgraph cluster_2 {
label="Stateful set b";
CoreService1b -> CRDB1b;
}
CRDB1a -> PrivateKey1 [dir=back,style=dotted];
CRDB1b -> PrivateKey1 [dir=back,style=dotted];
CoreService1b -> CRDB1a;
CoreService1a -> CRDB1b;
LoadBalancer1 -> CoreService1a
LoadBalancer1 -> CoreService1b
label="USS1's DSS instance"
subgraph cluster_1 {
label="Stateful set A";
CoreService1a -> CRDB1a;
}
subgraph cluster_2 {
label="Stateful set B";
CoreService1b -> CRDB1b;
}
CRDB1a -> PrivateKey1 [dir=back,style=dotted];
CRDB1b -> PrivateKey1 [dir=back,style=dotted];
CoreService1b -> CRDB1a;
CoreService1a -> CRDB1b;
LoadBalancer1 -> CoreService1a
LoadBalancer1 -> CoreService1b
}

subgraph cluster_4 {
label="USS2's DSS instance"
subgraph cluster_5 {
label="Stateful set a";
CoreService2a -> CRDB2a;
}
subgraph cluster_6 {
label="Stateful set b";
CoreService2b -> CRDB2b;
}
CRDB2a -> PrivateKey2 [dir=back,style=dotted];
CRDB2b -> PrivateKey2 [dir=back,style=dotted];
CoreService2b -> CRDB2a;
CoreService2a -> CRDB2b;
LoadBalancer2 -> CoreService2a
LoadBalancer2 -> CoreService2b
label="USS2's DSS instance"
subgraph cluster_5 {
label="Stateful set A";
CoreService2a -> CRDB2a;
}
subgraph cluster_6 {
label="Stateful set B";
CoreService2b -> CRDB2b;
}
CRDB2a -> PrivateKey2 [dir=back,style=dotted];
CRDB2b -> PrivateKey2 [dir=back,style=dotted];
CoreService2b -> CRDB2a;
CoreService2a -> CRDB2b;
LoadBalancer2 -> CoreService2a
LoadBalancer2 -> CoreService2b
}

subgraph cluster_8 {
label="USS3's DSS instance"
subgraph cluster_9 {
label="Stateful set a";
CoreService3a -> CRDB3a;
}
subgraph cluster_10 {
label="Stateful set b";
CoreService3b -> CRDB3b;
}
CRDB3a -> PrivateKey3 [dir=back,style=dotted];
CRDB3b -> PrivateKey3 [dir=back,style=dotted];
CoreService3b -> CRDB3a;
CoreService3a -> CRDB3b;
LoadBalancer3 -> CoreService3a
LoadBalancer3 -> CoreService3b
label="USS3's DSS instance"
subgraph cluster_9 {
label="Stateful set A";
CoreService3a -> CRDB3a;
}
subgraph cluster_10 {
label="Stateful set B";
CoreService3b -> CRDB3b;
}
CRDB3a -> PrivateKey3 [dir=back,style=dotted];
CRDB3b -> PrivateKey3 [dir=back,style=dotted];
CoreService3b -> CRDB3a;
CoreService3a -> CRDB3b;
LoadBalancer3 -> CoreService3a
LoadBalancer3 -> CoreService3b
}

DSSClient -> LoadBalancer1;
Expand Down
Loading
Loading