Skip to content

Commit 1a76bfb

Browse files
author
Ryan Lymburner
committed
Address PR comments.
1 parent 7f08fd9 commit 1a76bfb

File tree

7 files changed

+31
-35
lines changed

7 files changed

+31
-35
lines changed

docs/api-types/target-group-policy.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ spec:
9494
path: "/health"
9595
port: 8080
9696
protocol: HTTP
97-
protocolVersion: HTTP1
97+
protocolVersion: HTTP2
9898
statusMatch: "200-299"
9999
```
100100

docs/guides/advanced-configurations.md

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,6 @@ The `{index}` in the annotation corresponds to the zero-based index of the rule
4848

4949
Higher priority values indicate higher precedence, so requests to `/api/v2` will be matched by the first rule (priority 200) before the second rule (priority 100) is considered.
5050

51-
### Multi-Cluster Health Check Configuration
52-
53-
In multi-cluster deployments, you can ensure consistent health check configuration across all clusters by applying TargetGroupPolicy to ServiceExport resources. This eliminates the previous limitation where only the cluster containing the route resource would receive the correct health check configuration.
54-
5551
#### Configuring Health Checks for ServiceExport
5652

5753
When you apply a TargetGroupPolicy to a ServiceExport, the health check configuration is automatically propagated to all target groups across all clusters that participate in the service mesh:

pkg/deploy/lattice/health_check_resolver.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ func NewHealthCheckConfigResolver(log gwlog.Logger, client client.Client) *Healt
4141
// 5. Preserves existing behavior when no policies are applicable
4242
func (r *HealthCheckConfigResolver) ResolveHealthCheckConfig(ctx context.Context, targetGroup *model.TargetGroup) (*vpclattice.HealthCheckConfig, error) {
4343
if r.client == nil {
44-
r.log.Debugf(ctx, "No client available for policy resolution, skipping health check config resolution")
44+
r.log.Debugf(ctx, "No k8sClient available for policy resolution, skipping health check config resolution")
4545
return nil, nil
4646
}
4747

pkg/deploy/lattice/health_check_resolver_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ func TestHealthCheckConfigResolver_ResolveHealthCheckConfig(t *testing.T) {
7878
_ = anv1alpha1.Install(scheme)
7979
_ = corev1.AddToScheme(scheme)
8080

81-
// Create fake client with no objects
81+
// Create fake k8sClient with no objects
8282
k8sClient := fake.NewClientBuilder().
8383
WithScheme(scheme).
8484
Build()
@@ -438,14 +438,14 @@ func Test_ResolveHealthCheckConfig_WithPolicies(t *testing.T) {
438438
_ = corev1.AddToScheme(scheme)
439439
_ = gwv1alpha2.Install(scheme)
440440

441-
// Convert policies to client.Object slice
441+
// Convert policies to k8sClient.Object slice
442442
objects := make([]client.Object, len(tt.policies))
443443
for i, policy := range tt.policies {
444444
policyCopy := policy
445445
objects[i] = &policyCopy
446446
}
447447

448-
// Create fake client with policies
448+
// Create fake k8sClient with policies
449449
k8sClient := fake.NewClientBuilder().
450450
WithScheme(scheme).
451451
WithObjects(objects...).
@@ -492,7 +492,7 @@ func Test_ResolveHealthCheckConfig_ErrorHandling(t *testing.T) {
492492
t.Run(tt.name, func(t *testing.T) {
493493
ctx := context.Background()
494494

495-
// Create a mock controller and client that returns an error
495+
// Create a mock controller and k8sClient that returns an error
496496
c := gomock.NewController(t)
497497
defer c.Finish()
498498

pkg/deploy/lattice/target_group_manager.go

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,16 @@ type TargetGroupManager interface {
3232
}
3333

3434
type defaultTargetGroupManager struct {
35-
log gwlog.Logger
36-
cloud pkg_aws.Cloud
37-
client client.Client
35+
log gwlog.Logger
36+
awsCloud pkg_aws.Cloud
37+
k8sClient client.Client
3838
}
3939

40-
func NewTargetGroupManager(log gwlog.Logger, cloud pkg_aws.Cloud, client client.Client) *defaultTargetGroupManager {
40+
func NewTargetGroupManager(log gwlog.Logger, awsCloud pkg_aws.Cloud, k8sClient client.Client) *defaultTargetGroupManager {
4141
return &defaultTargetGroupManager{
42-
log: log,
43-
cloud: cloud,
44-
client: client,
42+
log: log,
43+
awsCloud: awsCloud,
44+
k8sClient: k8sClient,
4545
}
4646
}
4747

@@ -89,7 +89,7 @@ func (s *defaultTargetGroupManager) create(ctx context.Context, modelTg *model.T
8989
Config: latticeTgCfg,
9090
Name: &latticeTgName,
9191
Type: &latticeTgType,
92-
Tags: s.cloud.DefaultTags(),
92+
Tags: s.awsCloud.DefaultTags(),
9393
}
9494
createInput.Tags[model.K8SClusterNameKey] = &modelTg.Spec.K8SClusterName
9595
createInput.Tags[model.K8SServiceNameKey] = &modelTg.Spec.K8SServiceName
@@ -102,7 +102,7 @@ func (s *defaultTargetGroupManager) create(ctx context.Context, modelTg *model.T
102102
createInput.Tags[model.K8SRouteNamespaceKey] = &modelTg.Spec.K8SRouteNamespace
103103
}
104104

105-
lattice := s.cloud.Lattice()
105+
lattice := s.awsCloud.Lattice()
106106
resp, err := lattice.CreateTargetGroupWithContext(ctx, &createInput)
107107
if err != nil {
108108
return model.TargetGroupStatus{},
@@ -135,7 +135,7 @@ func (s *defaultTargetGroupManager) update(ctx context.Context, targetGroup *mod
135135
}
136136

137137
// Try to resolve health check configuration from TargetGroupPolicy using centralized resolver
138-
resolver := NewHealthCheckConfigResolver(s.log, s.client)
138+
resolver := NewHealthCheckConfigResolver(s.log, s.k8sClient)
139139
policyHealthCheckConfig, err := resolver.ResolveHealthCheckConfig(ctx, targetGroup)
140140
if err != nil {
141141
s.log.Debugf(ctx, "Failed to resolve health check config from policy: %v", err)
@@ -148,7 +148,7 @@ func (s *defaultTargetGroupManager) update(ctx context.Context, targetGroup *mod
148148
s.fillDefaultHealthCheckConfig(healthCheckConfig, targetGroup.Spec.Protocol, targetGroup.Spec.ProtocolVersion)
149149

150150
if !reflect.DeepEqual(healthCheckConfig, latticeTg.Config.HealthCheck) {
151-
_, err := s.cloud.Lattice().UpdateTargetGroupWithContext(ctx, &vpclattice.UpdateTargetGroupInput{
151+
_, err := s.awsCloud.Lattice().UpdateTargetGroupWithContext(ctx, &vpclattice.UpdateTargetGroupInput{
152152
HealthCheck: healthCheckConfig,
153153
TargetGroupIdentifier: latticeTg.Id,
154154
})
@@ -188,7 +188,7 @@ func (s *defaultTargetGroupManager) Delete(ctx context.Context, modelTg *model.T
188188
}
189189
s.log.Debugf(ctx, "Deleting target group %s", modelTg.Status.Id)
190190

191-
lattice := s.cloud.Lattice()
191+
lattice := s.awsCloud.Lattice()
192192

193193
// de-register all targets first
194194
listTargetsInput := vpclattice.ListTargetsInput{
@@ -269,7 +269,7 @@ type tgListOutput struct {
269269

270270
// Retrieve all TGs in the account, including tags. If individual tags fetch fails, tags will be nil for that tg
271271
func (s *defaultTargetGroupManager) List(ctx context.Context) ([]tgListOutput, error) {
272-
lattice := s.cloud.Lattice()
272+
lattice := s.awsCloud.Lattice()
273273
var tgList []tgListOutput
274274
targetGroupListInput := vpclattice.ListTargetGroupsInput{}
275275
resp, err := lattice.ListTargetGroupsAsList(ctx, &targetGroupListInput)
@@ -282,7 +282,7 @@ func (s *defaultTargetGroupManager) List(ctx context.Context) ([]tgListOutput, e
282282
tgArns := utils.SliceMap(resp, func(tg *vpclattice.TargetGroupSummary) string {
283283
return aws.StringValue(tg.Arn)
284284
})
285-
tgArnToTagsMap, err := s.cloud.Tagging().GetTagsForArns(ctx, tgArns)
285+
tgArnToTagsMap, err := s.awsCloud.Tagging().GetTagsForArns(ctx, tgArns)
286286

287287
if err != nil {
288288
return nil, err
@@ -300,7 +300,7 @@ func (s *defaultTargetGroupManager) findTargetGroup(
300300
ctx context.Context,
301301
modelTargetGroup *model.TargetGroup,
302302
) (*vpclattice.GetTargetGroupOutput, error) {
303-
arns, err := s.cloud.Tagging().FindResourcesByTags(ctx, services.ResourceTypeTargetGroup,
303+
arns, err := s.awsCloud.Tagging().FindResourcesByTags(ctx, services.ResourceTypeTargetGroup,
304304
model.TagsFromTGTagFields(modelTargetGroup.Spec.TargetGroupTagFields))
305305
if err != nil {
306306
return nil, err
@@ -310,7 +310,7 @@ func (s *defaultTargetGroupManager) findTargetGroup(
310310
}
311311

312312
for _, arn := range arns {
313-
latticeTg, err := s.cloud.Lattice().GetTargetGroupWithContext(ctx, &vpclattice.GetTargetGroupInput{
313+
latticeTg, err := s.awsCloud.Lattice().GetTargetGroupWithContext(ctx, &vpclattice.GetTargetGroupInput{
314314
TargetGroupIdentifier: &arn,
315315
})
316316
if err != nil {

pkg/deploy/lattice/target_group_manager_test.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1215,8 +1215,8 @@ func Test_update_ServiceExportWithPolicy_Integration(t *testing.T) {
12151215
mockTagging := mocks.NewMockTagging(c)
12161216
cloud := pkg_aws.NewDefaultCloudWithTagging(mockLattice, mockTagging, TestCloudConfig)
12171217

1218-
// Since we don't have a real k8s client in this test, we'll test the case where
1219-
// no client is available (which should fall back to default behavior)
1218+
// Since we don't have a real k8s k8sClient in this test, we'll test the case where
1219+
// no k8sClient is available (which should fall back to default behavior)
12201220
tgManager := NewTargetGroupManager(gwlog.FallbackLogger, cloud, nil)
12211221

12221222
// The update should not call UpdateTargetGroup since the health check config
@@ -1411,7 +1411,7 @@ func Test_update_ServiceExportWithPolicyResolution(t *testing.T) {
14111411
_ = corev1.AddToScheme(scheme)
14121412
_ = gwv1alpha2.Install(scheme)
14131413

1414-
// Create fake client with policy if provided
1414+
// Create fake k8sClient with policy if provided
14151415
clientBuilder := fake.NewClientBuilder().WithScheme(scheme)
14161416
if tt.policy != nil {
14171417
clientBuilder = clientBuilder.WithObjects(tt.policy)
@@ -1451,18 +1451,18 @@ func Test_update_ServiceExportWithPolicyResolution(t *testing.T) {
14511451
}
14521452

14531453
// Test_update_BackwardsCompatibility tests that existing behavior is preserved
1454-
// when no policies are present or when the client is nil
1454+
// when no policies are present or when the k8sClient is nil
14551455
func Test_update_BackwardsCompatibility(t *testing.T) {
14561456
tests := []struct {
14571457
name string
1458-
client bool // whether to provide a k8s client
1458+
client bool // whether to provide a k8s k8sClient
14591459
targetGroup *model.TargetGroup
14601460
existingHealthCheck *vpclattice.HealthCheckConfig
14611461
expectedHealthCheck *vpclattice.HealthCheckConfig
14621462
expectUpdate bool
14631463
}{
14641464
{
1465-
name: "No client provided - should use default behavior",
1465+
name: "No k8sClient provided - should use default behavior",
14661466
client: false,
14671467
targetGroup: &model.TargetGroup{
14681468
Spec: model.TargetGroupSpec{
@@ -1538,7 +1538,7 @@ func Test_update_BackwardsCompatibility(t *testing.T) {
15381538
expectUpdate: false, // Same config, no update needed
15391539
},
15401540
{
1541-
name: "HTTPRoute target group with client - should skip policy resolution",
1541+
name: "HTTPRoute target group with k8sClient - should skip policy resolution",
15421542
client: true,
15431543
targetGroup: &model.TargetGroup{
15441544
Spec: model.TargetGroupSpec{

pkg/deploy/lattice/target_group_synthesizer_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -515,11 +515,11 @@ func Test_SynthesizeCreate_WithServiceExportTargetGroup(t *testing.T) {
515515
mockTGManager.EXPECT().Upsert(ctx, gomock.Any()).Return(
516516
model.TargetGroupStatus{Name: "test-tg", Arn: "arn:test", Id: "tg-123"}, nil)
517517

518-
// Create the synthesizer with nil client to test graceful handling when no client is available
518+
// Create the synthesizer with nil k8sClient to test graceful handling when no k8sClient is available
519519
synthesizer := NewTargetGroupSynthesizer(
520520
gwlog.FallbackLogger, nil, nil, mockTGManager, nil, nil, stack)
521521

522-
// Execute SynthesizeCreate - should not fail even when client is nil
522+
// Execute SynthesizeCreate - should not fail even when k8sClient is nil
523523
err = synthesizer.SynthesizeCreate(ctx)
524524
assert.NoError(t, err)
525525
}

0 commit comments

Comments
 (0)