Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 5 additions & 22 deletions token/services/identity/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"github.com/hyperledger-labs/fabric-token-sdk/token/driver"
idriver "github.com/hyperledger-labs/fabric-token-sdk/token/services/identity/driver"
"github.com/hyperledger-labs/fabric-token-sdk/token/services/logging"
cache2 "github.com/hyperledger-labs/fabric-token-sdk/token/services/utils/cache"
"go.uber.org/zap/zapcore"
)

Expand Down Expand Up @@ -73,7 +74,6 @@ type Provider struct {

isMeCache cache[bool]
signers cache[*SignerEntry]
verifiers cache[*VerifierEntry]
}

// NewProvider creates a new identity provider implementing the driver.IdentityProvider interface.
Expand All @@ -91,9 +91,8 @@ func NewProvider(
enrollmentIDUnmarshaler: enrollmentIDUnmarshaler,
deserializer: deserializer,
storage: storage,
isMeCache: secondcache.NewTyped[bool](5000),
signers: secondcache.NewTyped[*SignerEntry](5000),
verifiers: secondcache.NewTyped[*VerifierEntry](5000),
isMeCache: cache2.NewNoCache[bool](),
signers: secondcache.NewTyped[*SignerEntry](5),
}
}

Expand All @@ -117,13 +116,6 @@ func (p *Provider) RegisterVerifier(ctx context.Context, identity driver.Identit
if v == nil {
return errors.New("invalid verifier, expected a valid instance")
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it looks like this function is not really needed anymore. I would start an investigation on the callers of this function. Maybe we can save further cycles.

idHash := identity.UniqueID()
entry := &VerifierEntry{Verifier: v}
if p.Logger.IsEnabledFor(zapcore.DebugLevel) {
entry.DebugStack = debug.Stack()
}
p.verifiers.Add(idHash, entry)
p.Logger.DebugfContext(ctx, "register verifier to [%s]:[%s]", idHash, logging.Identifier(v))
return nil
}

Expand Down Expand Up @@ -274,6 +266,7 @@ func (p *Provider) getSigner(ctx context.Context, identity driver.Identity, idHa
if err != nil {
return nil, errors.Wrapf(err, "failed deserializing identity for signer [%s]", identity)
}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we said to cache only the identity whose type is x509, no?
If we still cache the signers and the cache is only of size 5 there will be even more contention, no?

entry = &SignerEntry{Signer: signer}
if p.Logger.IsEnabledFor(zapcore.DebugLevel) {
entry.DebugStack = debug.Stack()
Expand All @@ -282,6 +275,7 @@ func (p *Provider) getSigner(ctx context.Context, identity driver.Identity, idHa
if err := p.storage.StoreSignerInfo(ctx, identity, nil); err != nil {
return nil, errors.Wrap(err, "failed to store entry in storage for the passed signer")
}

return entry.Signer, nil
}

Expand Down Expand Up @@ -331,15 +325,4 @@ func (p *Provider) updateCaches(descriptor *idriver.IdentityDescriptor, alias dr
p.signers.Add(aliasID, entry)
}
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we are still caching any signer no matter the identity type. This will generate even more contention on the signers cache given its new size (5)

// verifiers
if descriptor.Verifier != nil {
entry := &VerifierEntry{Verifier: descriptor.Verifier}
if p.Logger.IsEnabledFor(zapcore.DebugLevel) {
entry.DebugStack = debug.Stack()
}
p.verifiers.Add(id, entry)
if setAlias {
p.verifiers.Add(aliasID, entry)
}
}
}
Loading