Skip to content

Commit e557169

Browse files
authored
Merge pull request #55 from RobotSail/RobotSail/issue35
calculate bloom filter size + refactor
2 parents 9a99bd0 + 40b0ccf commit e557169

File tree

10 files changed

+260
-89
lines changed

10 files changed

+260
-89
lines changed

Makefile

Lines changed: 34 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,9 @@ vet: ## Run go vet against code.
130130

131131
.PHONY: test
132132
GINKGO_ARGS ?= --progress --fail-on-pending --keep-going --cover --coverprofile=cover.profile --race --trace --json-report=report.json --timeout=3m
133+
GINKGO_TARGETS ?= ./...
133134
test: lint manifests generate fmt vet lint envtest ginkgo ## Run tests.
134-
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" $(GINKGO) run $(GINKGO_ARGS) ./...
135+
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" $(GINKGO) run $(GINKGO_ARGS) $(GINKGO_TARGETS)
135136

136137
.PHONY: test-e2e
137138
test-e2e: kuttl ## Run e2e tests. Requires cluster w/ Scribe already installed
@@ -211,11 +212,38 @@ TMP_DIR=$$(mktemp -d) ;\
211212
cd $$TMP_DIR ;\
212213
go mod init tmp ;\
213214
echo "Downloading $(2)" ;\
214-
GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\
215+
GOBIN=$(LOCALBIN) go get $(2) ;\
215216
rm -rf $$TMP_DIR ;\
216217
}
217218
endef
218219

220+
# go-install-tool will 'go install' any package $2 and install it to $1.
221+
define go-install-tool
222+
@[ -f $(1) ] || { \
223+
set -e ;\
224+
TMP_DIR=$$(mktemp -d) ;\
225+
cd $$TMP_DIR ;\
226+
go mod init tmp ;\
227+
echo "Downloading $(2)" ;\
228+
GOBIN=$(LOCALBIN) go install $(2) ;\
229+
rm -rf $$TMP_DIR ;\
230+
}
231+
endef
232+
233+
# go-install-mod-tool will 'go install' any package $2 and install it to $1.
234+
define go-install-mod-tool
235+
@[ -f $(1) ] || { \
236+
set -e ;\
237+
TMP_DIR=$$(mktemp -d) ;\
238+
cd $$TMP_DIR ;\
239+
go mod init tmp ;\
240+
echo "Downloading $(2)" ;\
241+
GOBIN=$(LOCALBIN) go install -mod=mod $(2) ;\
242+
rm -rf $$TMP_DIR ;\
243+
}
244+
endef
245+
246+
219247
.PHONY: bundle
220248
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
221249
operator-sdk generate kustomize manifests -q
@@ -273,7 +301,7 @@ catalog-push: ## Push a catalog image.
273301
$(MAKE) docker-push IMG=$(CATALOG_IMG)
274302

275303

276-
##@ Download Utilities
304+
##@ Download tools
277305

278306
# download-tool will curl any file $2 and install it to $1.
279307
define download-tool
@@ -282,32 +310,9 @@ set -e ;\
282310
echo "📥 Downloading $(2)" ;\
283311
curl -sSLo "$(1)" "$(2)" ;\
284312
chmod a+x "$(1)" ;\
285-
echo "✅ Done" ;\
286-
}
287-
endef
288-
289-
# install-go-tool will download any $2 URL and install to $1
290-
define install-go-tool
291-
@[ -f $(1) ] || { \
292-
set -e ;\
293-
echo "📥 Downloading $(2)" ;\
294-
GOBIN=$(1) go install $(2) ;\
295-
echo "✅ Done" ;\
296313
}
297314
endef
298315

299-
# install-go-tool will download any $2 URL and install to $1
300-
define install-go-tool-mod
301-
@[ -f $(1) ] || { \
302-
set -e ;\
303-
echo "📥 Downloading $(2)" ;\
304-
GOBIN=$(1) go install -mod=mod $(2) ;\
305-
echo "✅ Done" ;\
306-
}
307-
endef
308-
309-
310-
311316
.PHONY: kuttl
312317
KUTTL := $(LOCALBIN)/kuttl
313318
KUTTL_URL := https://github.com/kudobuilder/kuttl/releases/download/v$(KUTTL_VERSION)/kubectl-kuttl_$(KUTTL_VERSION)_linux_x86_64
@@ -319,15 +324,15 @@ GINKGO := $(LOCALBIN)/ginkgo
319324
GINKGO_URL := github.com/onsi/ginkgo/v2/ginkgo
320325
ginkgo: $(GINKGO) ## Install ginkgo
321326
$(GINKGO): $(LOCALBIN)
322-
$(call install-go-tool-mod,$(LOCALBIN),$(GINKGO_URL))
327+
$(call go-install-mod-tool,$(LOCALBIN),$(GINKGO_URL))
323328

324329

325330
.PHONY: kustomize
326331
KUSTOMIZE = $(LOCALBIN)/kustomize
327332
KUSTOMIZE_URL := sigs.k8s.io/kustomize/kustomize/$(KUSTOMIZE_MAJOR)@$(KUSTOMIZE_VERSION)
328333
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
329334
$(KUSTOMIZE): $(LOCALBIN)
330-
$(call install-go-tool,$(LOCALBIN),$(KUSTOMIZE_URL))
335+
$(call go-install-tool,$(LOCALBIN),$(KUSTOMIZE_URL))
331336

332337
.PHONY: helm
333338
HELM := $(LOCALBIN)/helm
@@ -344,4 +349,4 @@ golangci-lint: $(GOLANGCILINT) ## Download golangci-lint
344349
$(GOLANGCILINT): $(LOCALBIN)
345350
@ echo "📥 Downloading helm"
346351
curl -sSfL $(GOLANGCI_URL) | sh -s -- -b $(LOCALBIN) $(GOLANGCI_VERSION)
347-
@ echo "✅ Done"
352+
@ echo "✅ Done"

api/v1alpha1/ipfs_types.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ limitations under the License.
1717
package v1alpha1
1818

1919
import (
20+
"k8s.io/apimachinery/pkg/api/resource"
2021
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2122
)
2223

@@ -42,13 +43,13 @@ type networkConfig struct {
4243

4344
type IpfsSpec struct {
4445
// +kubebuilder:validation:Optional
45-
URL string `json:"url"`
46-
Public bool `json:"public"`
47-
IpfsStorage string `json:"ipfsStorage"`
48-
ClusterStorage string `json:"clusterStorage"`
49-
Replicas int32 `json:"replicas"`
50-
Networking networkConfig `json:"networking"`
51-
Follows []followParams `json:"follows"`
46+
URL string `json:"url"`
47+
Public bool `json:"public"`
48+
IpfsStorage resource.Quantity `json:"ipfsStorage"`
49+
ClusterStorage string `json:"clusterStorage"`
50+
Replicas int32 `json:"replicas"`
51+
Networking networkConfig `json:"networking"`
52+
Follows []followParams `json:"follows"`
5253
}
5354

5455
type IpfsStatus struct {

controllers/scripts.go

Lines changed: 97 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,19 @@ package controllers
22

33
import (
44
"context"
5-
"strconv"
5+
"fmt"
66

77
corev1 "k8s.io/api/core/v1"
8-
"k8s.io/apimachinery/pkg/api/resource"
98
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
109
ctrl "sigs.k8s.io/controller-runtime"
1110

11+
"github.com/alecthomas/units"
1212
"github.com/ipfs/kubo/config"
1313
"github.com/libp2p/go-libp2p-core/peer"
1414
ma "github.com/multiformats/go-multiaddr"
1515
clusterv1alpha1 "github.com/redhat-et/ipfs-operator/api/v1alpha1"
1616
"github.com/redhat-et/ipfs-operator/controllers/scripts"
17+
"github.com/redhat-et/ipfs-operator/controllers/utils"
1718
"sigs.k8s.io/controller-runtime/pkg/client"
1819
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
1920
ctrllog "sigs.k8s.io/controller-runtime/pkg/log"
@@ -35,68 +36,57 @@ func (r *IpfsReconciler) ConfigMapScripts(
3536
m *clusterv1alpha1.Ipfs,
3637
cm *corev1.ConfigMap,
3738
) (controllerutil.MutateFn, string) {
39+
var err error
3840
log := ctrllog.FromContext(ctx)
39-
relayPeers := []peer.AddrInfo{}
40-
relayStatic := []string{}
41-
for _, relayName := range m.Status.CircuitRelays {
42-
relay := clusterv1alpha1.CircuitRelay{}
43-
relay.Name = relayName
44-
relay.Namespace = m.Namespace
45-
err := r.Get(ctx, client.ObjectKeyFromObject(&relay), &relay)
46-
if err != nil {
47-
log.Error(err, "could not lookup circuitRelay during confgMapScripts", "relay", relayName)
48-
return nil, ""
49-
}
50-
if err = relay.Status.AddrInfo.Parse(); err != nil {
51-
log.Error(err, "could not parse AddrInfo. Information will not be included in config", "relay", relayName)
52-
continue
53-
}
54-
ai := relay.Status.AddrInfo.AddrInfo()
55-
relayPeers = append(relayPeers, *ai)
56-
p2ppart, err := ma.NewMultiaddr("/p2p/" + ai.ID.String())
57-
if err != nil {
58-
log.Error(err, "could not create p2p component during configMapScripts", "relay", relayName)
59-
}
60-
for _, addr := range ai.Addrs {
61-
fullMa := addr.Encapsulate(p2ppart).String()
62-
relayStatic = append(relayStatic, fullMa)
63-
}
64-
}
6541

66-
cmName := "ipfs-cluster-scripts-" + m.Name
67-
var storageMaxGB string
68-
parsed, err := resource.ParseQuantity(m.Spec.IpfsStorage)
42+
relayPeers, err := r.getCircuitInfo(ctx, m)
6943
if err != nil {
70-
storageMaxGB = "100"
71-
} else {
72-
sizei64, _ := parsed.AsInt64()
73-
sizeGB := sizei64 / 1024 / 1024 / 1024
74-
var reducedSize int64
75-
// if the disk is big, use a bigger percentage of it.
76-
if sizeGB > 1024*8 {
77-
reducedSize = sizeGB * 9 / 10
78-
} else {
79-
reducedSize = sizeGB * 8 / 10
80-
}
81-
storageMaxGB = strconv.Itoa(int(reducedSize))
44+
log.Error(err, "could not get relay circuit info")
45+
return utils.ErrFunc(fmt.Errorf("error when getting relay circuit info: %w", err)), ""
46+
}
47+
relayStatic, err := staticAddrsFromRelayPeers(relayPeers)
48+
if err != nil {
49+
log.Error(err, "could not get static addresses from relayPeers")
50+
return utils.ErrFunc(fmt.Errorf("could not get static addresses: %w", err)), ""
51+
}
52+
// convert multiaddrs to strings
53+
relayStaticStrs := make([]string, len(relayStatic))
54+
for i, maddr := range relayStatic {
55+
relayStaticStrs[i] = maddr.String()
8256
}
8357

8458
relayConfig := config.RelayClient{
8559
Enabled: config.True,
86-
StaticRelays: relayStatic,
60+
StaticRelays: relayStaticStrs,
8761
}
8862

89-
// get the config script
90-
configScript, err := scripts.CreateConfigureScript(
91-
storageMaxGB,
92-
relayPeers,
93-
relayConfig,
94-
)
63+
cmName := "ipfs-cluster-scripts-" + m.Name
64+
65+
// configure storage variables
66+
if err != nil {
67+
return utils.ErrFunc(err), ""
68+
}
69+
70+
// compute storage sizes of IPFS volumes
71+
sizei64, ok := m.Spec.IpfsStorage.AsInt64()
72+
if !ok {
73+
sizei64 = m.Spec.IpfsStorage.ToDec().Value()
74+
}
75+
maxStorage := MaxIPFSStorage(sizei64)
76+
maxStorageS := fmt.Sprintf("%dB", maxStorage)
77+
bloomFilterSize := scripts.CalculateBloomFilterSize(maxStorage)
9578
if err != nil {
9679
return func() error {
9780
return err
9881
}, ""
9982
}
83+
// get the config script
84+
configScript, err := scripts.CreateConfigureScript(
85+
maxStorageS,
86+
relayPeers,
87+
relayConfig,
88+
bloomFilterSize,
89+
)
10090

10191
expected := &corev1.ConfigMap{
10292
ObjectMeta: metav1.ObjectMeta{
@@ -118,3 +108,60 @@ func (r *IpfsReconciler) ConfigMapScripts(
118108
return nil
119109
}, cmName
120110
}
111+
112+
// staticAddrsFromRelayPeers Extracts all of the static addresses from the
113+
// given list of relayPeers.
114+
func staticAddrsFromRelayPeers(relayPeers []peer.AddrInfo) ([]ma.Multiaddr, error) {
115+
relayStatic := make([]ma.Multiaddr, 0)
116+
for _, addrInfo := range relayPeers {
117+
p2ppart, err := ma.NewMultiaddr("/p2p/" + addrInfo.ID.String())
118+
if err != nil {
119+
return nil, fmt.Errorf("could not create p2p component: %w", err)
120+
}
121+
for _, addr := range addrInfo.Addrs {
122+
fullMa := addr.Encapsulate(p2ppart)
123+
relayStatic = append(relayStatic, fullMa)
124+
}
125+
}
126+
return relayStatic, nil
127+
}
128+
129+
// getCircuitInfo Gets address info from the list of CircuitRelays
130+
// and returns a list of AddrInfo.
131+
func (r *IpfsReconciler) getCircuitInfo(
132+
ctx context.Context,
133+
ipfs *clusterv1alpha1.Ipfs,
134+
) ([]peer.AddrInfo, error) {
135+
log := ctrllog.FromContext(ctx)
136+
relayPeers := []peer.AddrInfo{}
137+
for _, relayName := range ipfs.Status.CircuitRelays {
138+
relay := clusterv1alpha1.CircuitRelay{}
139+
relay.Name = relayName
140+
relay.Namespace = ipfs.Namespace
141+
// OPTIMIZE: do this asynchronously?
142+
if err := r.Get(ctx, client.ObjectKeyFromObject(&relay), &relay); err != nil {
143+
return nil, fmt.Errorf("could not lookup circuitRelay: %w", err)
144+
}
145+
if err := relay.Status.AddrInfo.Parse(); err != nil {
146+
log.Error(err, "could not parse AddrInfo. Information will not be included in config", "relay", relayName)
147+
continue
148+
}
149+
addrInfo := relay.Status.AddrInfo.AddrInfo()
150+
relayPeers = append(relayPeers, *addrInfo)
151+
}
152+
return relayPeers, nil
153+
}
154+
155+
// MaxIPSStorage Accepts a storage quantity and returns with a
156+
// calculated value to be used for setting the Max IPFS storage value
157+
// in bytes.
158+
func MaxIPFSStorage(ipfsStorage int64) (storageMaxGB int64) {
159+
var reducedSize units.Base2Bytes
160+
// if the disk is big, use a bigger percentage of it.
161+
if units.Base2Bytes(ipfsStorage) > units.Tebibyte*8 {
162+
reducedSize = units.Base2Bytes(ipfsStorage) * 9 / 10
163+
} else {
164+
reducedSize = units.Base2Bytes(ipfsStorage) * 8 / 10
165+
}
166+
return int64(reducedSize)
167+
}

0 commit comments

Comments
 (0)