-
Notifications
You must be signed in to change notification settings - Fork 6
Add Health Check Endpoint #101
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
419973a
b93cbd6
f1fad30
2132855
cb3438c
a550c56
c1afd86
765d597
bc3b5b4
816f69c
1f5f93a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,134 @@ | ||
| package heimdall | ||
|
|
||
| import ( | ||
| "context" | ||
| "encoding/json" | ||
| "net/http" | ||
| "sync" | ||
| "time" | ||
|
|
||
| "github.com/patterninc/heimdall/pkg/object/cluster" | ||
| "github.com/patterninc/heimdall/pkg/object/status" | ||
| "github.com/patterninc/heimdall/pkg/plugin" | ||
| ) | ||
|
|
||
| const ( | ||
| healthCheckTimeout = 30 * time.Second | ||
| healthCheckConcurrency = 10 | ||
| healthStatusOK = `ok` | ||
| healthStatusError = `error` | ||
| healthStatusUnchecked = `unchecked` | ||
| ) | ||
|
|
||
| type clusterProbe struct { | ||
| cluster *cluster.Cluster | ||
| handler plugin.Handler | ||
| pluginName string | ||
| } | ||
|
|
||
| type healthCheckResult struct { | ||
| ClusterID string `json:"cluster_id"` | ||
| ClusterName string `json:"cluster_name"` | ||
| Plugin string `json:"plugin"` | ||
| Status string `json:"status"` | ||
| LatencyMs int64 `json:"latency_ms"` | ||
| Error string `json:"error,omitempty"` | ||
| } | ||
|
|
||
| type healthChecksResponse struct { | ||
| Healthy bool `json:"healthy"` | ||
| Checks []healthCheckResult `json:"checks"` | ||
| } | ||
|
|
||
| func (h *Heimdall) healthHandler(w http.ResponseWriter, r *http.Request) { | ||
| ctx, cancel := context.WithTimeout(r.Context(), healthCheckTimeout) | ||
| defer cancel() | ||
|
|
||
| probes := h.resolveClusterProbes() | ||
| results := h.runHealthChecks(ctx, probes) | ||
|
|
||
| healthy := true | ||
| for _, res := range results { | ||
| if res.Status == healthStatusError { | ||
| healthy = false | ||
| break | ||
| } | ||
| } | ||
|
|
||
| resp := healthChecksResponse{Healthy: healthy, Checks: results} | ||
| data, _ := json.Marshal(resp) | ||
|
|
||
| w.Header().Set(contentTypeKey, contentTypeJSON) | ||
| if healthy { | ||
| w.WriteHeader(http.StatusOK) | ||
| } else { | ||
| w.WriteHeader(http.StatusServiceUnavailable) | ||
| } | ||
| w.Write(data) | ||
| } | ||
|
|
||
| func (h *Heimdall) resolveClusterProbes() []*clusterProbe { | ||
| var probes []*clusterProbe | ||
| for _, cl := range h.Clusters { | ||
| if cl.Status != status.Active || !cl.HealthCheck { | ||
| continue | ||
| } | ||
| for _, cmd := range h.Commands { | ||
| if cmd.Status != status.Active { | ||
| continue | ||
| } | ||
| if cl.Tags.Contains(cmd.ClusterTags) { | ||
| probes = append(probes, &clusterProbe{ | ||
| cluster: cl, | ||
| handler: h.commandHandlers[cmd.ID], | ||
| pluginName: cmd.Plugin, | ||
| }) | ||
| break | ||
| } | ||
| } | ||
| } | ||
| return probes | ||
| } | ||
|
|
||
| func (h *Heimdall) runHealthChecks(ctx context.Context, probes []*clusterProbe) []healthCheckResult { | ||
| results := make([]healthCheckResult, len(probes)) | ||
| sem := make(chan struct{}, healthCheckConcurrency) | ||
| var wg sync.WaitGroup | ||
| for i, probe := range probes { | ||
| wg.Add(1) | ||
| go func(i int, probe *clusterProbe) { | ||
| defer wg.Done() | ||
| sem <- struct{}{} | ||
| defer func() { <-sem }() | ||
| results[i] = h.checkCluster(ctx, probe) | ||
| }(i, probe) | ||
| } | ||
| wg.Wait() | ||
| return results | ||
| } | ||
|
|
||
| func (h *Heimdall) checkCluster(ctx context.Context, probe *clusterProbe) healthCheckResult { | ||
| start := time.Now() | ||
| res := healthCheckResult{ | ||
| ClusterID: probe.cluster.ID, | ||
| ClusterName: probe.cluster.Name, | ||
| Plugin: probe.pluginName, | ||
| } | ||
|
|
||
| hc, ok := probe.handler.(plugin.HealthChecker) | ||
| if !ok { | ||
| res.Status = healthStatusUnchecked | ||
| res.LatencyMs = time.Since(start).Milliseconds() | ||
| return res | ||
| } | ||
|
|
||
| err := hc.HealthCheck(ctx, probe.cluster) | ||
| res.LatencyMs = time.Since(start).Milliseconds() | ||
| if err != nil { | ||
| res.Status = healthStatusError | ||
| res.Error = err.Error() | ||
| } else { | ||
| res.Status = healthStatusOK | ||
| } | ||
| return res | ||
| } | ||
| Original file line number | Diff line number | Diff line change | ||||||
|---|---|---|---|---|---|---|---|---|
|
|
@@ -143,6 +143,45 @@ func (d *commandContext) Execute(ctx context.Context, r *plugin.Runtime, j *job. | |||||||
|
|
||||||||
| } | ||||||||
|
|
||||||||
| // HealthCheck implements the plugin.HealthChecker interface | ||||||||
| func (d *commandContext) HealthCheck(ctx context.Context, c *cluster.Cluster) error { | ||||||||
| clusterCtx := &clusterContext{} | ||||||||
| if c.Context != nil { | ||||||||
| if err := c.Context.Unmarshal(clusterCtx); err != nil { | ||||||||
| return err | ||||||||
| } | ||||||||
| } | ||||||||
|
|
||||||||
| awsConfig, err := config.LoadDefaultConfig(ctx) | ||||||||
| if err != nil { | ||||||||
| return err | ||||||||
| } | ||||||||
|
|
||||||||
| assumeRoleOptions := func(_ *dynamodb.Options) {} | ||||||||
| if clusterCtx.RoleARN != nil { | ||||||||
| stsSvc := sts.NewFromConfig(awsConfig) | ||||||||
| out, err := stsSvc.AssumeRole(ctx, &sts.AssumeRoleInput{ | ||||||||
| RoleArn: clusterCtx.RoleARN, | ||||||||
| RoleSessionName: assumeRoleSession, | ||||||||
| }) | ||||||||
| if err != nil { | ||||||||
| return err | ||||||||
| } | ||||||||
| assumeRoleOptions = func(o *dynamodb.Options) { | ||||||||
| o.Credentials = credentials.NewStaticCredentialsProvider( | ||||||||
| *out.Credentials.AccessKeyId, | ||||||||
| *out.Credentials.SecretAccessKey, | ||||||||
| *out.Credentials.SessionToken, | ||||||||
| ) | ||||||||
|
Comment on lines
+171
to
+175
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hardcoded AWS Credentials in Go Application More DetailsEmbedding static AWS credentials directly into a Go application using the
RemediationHardcoding AWS credentials into an application poses a significant security risk. If the application's code is compromised or accidentally exposed, the hardcoded credentials can be easily extracted and misused by attackers to gain unauthorized access to AWS resources, potentially leading to data breaches, financial losses, and other severe consequences. To fix this issue securely, applications should retrieve AWS credentials from secure sources at runtime, such as environment variables, AWS credential files, or AWS credential providers. This approach ensures that credentials are not embedded in the application's code and can be easily rotated or revoked if needed. Code examples: // VULNERABLE CODE - Hardcoded AWS credentials creds := credentials.NewStaticCredentialsProvider( // SECURE CODE - Using AWS credential provider cfg, err := config.LoadDefaultConfig(context.TODO()) // AWS credentials are retrieved securely from the environment or other sources Additional recommendations:
|
||||||||
| } | ||||||||
| } | ||||||||
|
|
||||||||
| svc := dynamodb.NewFromConfig(awsConfig, assumeRoleOptions) | ||||||||
| maxResults := int32(1) | ||||||||
| _, err = svc.ListTables(ctx, &dynamodb.ListTablesInput{Limit: &maxResults}) | ||||||||
| return err | ||||||||
| } | ||||||||
|
|
||||||||
| func (d *commandContext) Cleanup(ctx context.Context, jobID string, c *cluster.Cluster) error { | ||||||||
| // TODO: Implement cleanup if needed | ||||||||
| return nil | ||||||||
|
|
||||||||
Uh oh!
There was an error while loading. Please reload this page.