Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Identity #421

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
Open
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
367 changes: 119 additions & 248 deletions pkg/authn/authn.pb.go

Large diffs are not rendered by default.

36 changes: 19 additions & 17 deletions pkg/identity/api/v1/identity_service_test.go
Original file line number Diff line number Diff line change
@@ -23,21 +23,21 @@ func (m *mockedMLSValidationService) GetAssociationState(ctx context.Context, ol

member_map := make([]*associations.MemberMap, 0)
member_map = append(member_map, &associations.MemberMap{
Key: &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_Address{Address: "key_address"}},
Key: &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_EthereumAddress{EthereumAddress: "key_address"}},
Value: &associations.Member{
Identifier: &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_Address{Address: "ident"}},
AddedByEntity: &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_Address{Address: "added_by_entity"}},
Identifier: &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_EthereumAddress{EthereumAddress: "ident"}},
AddedByEntity: &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_EthereumAddress{EthereumAddress: "added_by_entity"}},
},
})

new_members := make([]*associations.MemberIdentifier, 0)

new_members = append(new_members, &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_Address{Address: "0x01"}})
new_members = append(new_members, &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_Address{Address: "0x02"}})
new_members = append(new_members, &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_Address{Address: "0x03"}})
new_members = append(new_members, &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_EthereumAddress{EthereumAddress: "0x01"}})
new_members = append(new_members, &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_EthereumAddress{EthereumAddress: "0x02"}})
new_members = append(new_members, &associations.MemberIdentifier{Kind: &associations.MemberIdentifier_EthereumAddress{EthereumAddress: "0x03"}})

out := mlsvalidate.AssociationStateResult{
AssociationState: &associations.AssociationState{InboxId: "test_inbox", Members: member_map, RecoveryAddress: "recovery", SeenSignatures: [][]byte{[]byte("seen"), []byte("sig")}},
AssociationState: &associations.AssociationState{InboxId: "test_inbox", Members: member_map, RecoveryIdentifier: "recovery", RecoveryIdentifierKind: associations.IdentifierKind_IDENTIFIER_KIND_ETHEREUM, SeenSignatures: [][]byte{[]byte("seen"), []byte("sig")}},
StateDiff: &associations.AssociationStateDiff{NewMembers: new_members, RemovedMembers: nil},
}
return &out, nil
@@ -88,9 +88,10 @@ func makeCreateInbox(address string) *associations.IdentityAction {
return &associations.IdentityAction{
Kind: &associations.IdentityAction_CreateInbox{
CreateInbox: &associations.CreateInbox{
InitialAddress: address,
Nonce: 0,
InitialAddressSignature: &associations.Signature{},
InitialIdentifier: address,
InitialIdentifierKind: associations.IdentifierKind_IDENTIFIER_KIND_ETHEREUM,
Nonce: 0,
InitialIdentifierSignature: &associations.Signature{},
},
},
}
@@ -110,8 +111,8 @@ func makeRevokeAssociation() *associations.IdentityAction {
return &associations.IdentityAction{
Kind: &associations.IdentityAction_Revoke{
Revoke: &associations.RevokeAssociation{
MemberToRevoke: &associations.MemberIdentifier{},
RecoveryAddressSignature: &associations.Signature{},
MemberToRevoke: &associations.MemberIdentifier{},
RecoveryIdentifierSignature: &associations.Signature{},
},
},
}
@@ -120,8 +121,8 @@ func makeChangeRecoveryAddress() *associations.IdentityAction {
return &associations.IdentityAction{
Kind: &associations.IdentityAction_ChangeRecoveryAddress{
ChangeRecoveryAddress: &associations.ChangeRecoveryAddress{
NewRecoveryAddress: "",
ExistingRecoveryAddressSignature: &associations.Signature{},
NewRecoveryIdentifier: "",
ExistingRecoveryIdentifierSignature: &associations.Signature{},
},
},
}
@@ -171,7 +172,8 @@ func TestPublishedUpdatesCanBeRead(t *testing.T) {
require.Equal(t, res.Responses[0].InboxId, inbox_id)
require.Len(t, res.Responses[0].Updates, 1)
require.Len(t, res.Responses[0].Updates[0].Update.Actions, 1)
require.Equal(t, res.Responses[0].Updates[0].Update.Actions[0].GetCreateInbox().InitialAddress, address)
require.Equal(t, res.Responses[0].Updates[0].Update.Actions[0].GetCreateInbox().InitialIdentifier, address)
require.Equal(t, res.Responses[0].Updates[0].Update.Actions[0].GetCreateInbox().InitialIdentifierKind, associations.IdentifierKind_IDENTIFIER_KIND_ETHEREUM)
}

func TestPublishedUpdatesAreInOrder(t *testing.T) {
@@ -230,8 +232,8 @@ func TestQueryMultipleInboxes(t *testing.T) {
require.NoError(t, err)

require.Len(t, res.Responses, 2)
require.Equal(t, res.Responses[0].Updates[0].Update.Actions[0].GetCreateInbox().InitialAddress, first_address)
require.Equal(t, res.Responses[1].Updates[0].Update.Actions[0].GetCreateInbox().InitialAddress, second_address)
require.Equal(t, res.Responses[0].Updates[0].Update.Actions[0].GetCreateInbox().InitialIdentifier, first_address)
require.Equal(t, res.Responses[1].Updates[0].Update.Actions[0].GetCreateInbox().InitialIdentifier, second_address)
}

func TestInboxSizeLimit(t *testing.T) {
80 changes: 40 additions & 40 deletions pkg/migrations/mls/20240528181851_init-schema.up.sql
Original file line number Diff line number Diff line change
@@ -1,71 +1,71 @@
SET statement_timeout = 0;
SET
statement_timeout = 0;

--bun:split
CREATE TABLE installations(
id BYTEA PRIMARY KEY,
created_at BIGINT NOT NULL,
updated_at BIGINT NOT NULL,
inbox_id BYTEA NOT NULL,
key_package BYTEA NOT NULL,
expiration BIGINT NOT NULL
CREATE TABLE installations (
id BYTEA PRIMARY KEY,
created_at BIGINT NOT NULL,
updated_at BIGINT NOT NULL,
inbox_id BYTEA NOT NULL,
key_package BYTEA NOT NULL,
expiration BIGINT NOT NULL
);

--bun:split
CREATE TABLE group_messages(
id BIGSERIAL PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT NOW(),
group_id BYTEA NOT NULL,
data BYTEA NOT NULL,
group_id_data_hash BYTEA NOT NULL
CREATE TABLE group_messages (
id BIGSERIAL PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT NOW (),
group_id BYTEA NOT NULL,
data BYTEA NOT NULL,
group_id_data_hash BYTEA NOT NULL
);

--bun:split
CREATE INDEX idx_group_messages_group_id_id ON group_messages(group_id, id);
CREATE INDEX idx_group_messages_group_id_id ON group_messages (group_id, id);

--bun:split
CREATE UNIQUE INDEX idx_group_messages_group_id_data_hash ON group_messages(group_id_data_hash);
CREATE UNIQUE INDEX idx_group_messages_group_id_data_hash ON group_messages (group_id_data_hash);

--bun:split
CREATE TABLE welcome_messages(
id BIGSERIAL PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT NOW(),
installation_key BYTEA NOT NULL,
data BYTEA NOT NULL,
hpke_public_key BYTEA NOT NULL,
installation_key_data_hash BYTEA NOT NULL
CREATE TABLE welcome_messages (
id BIGSERIAL PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT NOW (),
installation_key BYTEA NOT NULL,
data BYTEA NOT NULL,
hpke_public_key BYTEA NOT NULL,
installation_key_data_hash BYTEA NOT NULL
);

--bun:split
CREATE INDEX idx_welcome_messages_installation_key_id ON welcome_messages(installation_key, id);
CREATE INDEX idx_welcome_messages_installation_key_id ON welcome_messages (installation_key, id);

--bun:split
CREATE UNIQUE INDEX idx_welcome_messages_group_key_data_hash ON welcome_messages(installation_key_data_hash);
CREATE UNIQUE INDEX idx_welcome_messages_group_key_data_hash ON welcome_messages (installation_key_data_hash);

--bun:split
CREATE TABLE inbox_log(
sequence_id BIGSERIAL PRIMARY KEY,
inbox_id BYTEA NOT NULL,
server_timestamp_ns BIGINT NOT NULL,
identity_update_proto BYTEA NOT NULL
CREATE TABLE inbox_log (
sequence_id BIGSERIAL PRIMARY KEY,
inbox_id BYTEA NOT NULL,
server_timestamp_ns BIGINT NOT NULL,
identity_update_proto BYTEA NOT NULL
);

--bun:split
CREATE INDEX idx_inbox_log_inbox_id_sequence_id ON inbox_log(inbox_id, sequence_id);
CREATE INDEX idx_inbox_log_inbox_id_sequence_id ON inbox_log (inbox_id, sequence_id);

--bun:split
CREATE TABLE address_log(
address TEXT NOT NULL,
inbox_id BYTEA NOT NULL,
association_sequence_id BIGINT,
revocation_sequence_id BIGINT
CREATE TABLE address_log (
address TEXT NOT NULL,
inbox_id BYTEA NOT NULL,
association_sequence_id BIGINT,
revocation_sequence_id BIGINT
);

--bun:split
CREATE INDEX idx_address_log_address_inbox_id ON address_log(address, inbox_id);
CREATE INDEX idx_address_log_address_inbox_id ON address_log (address, inbox_id);

--bun:split
CREATE TYPE inbox_filter AS (
inbox_id TEXT, -- Because this is serialized as JSON, we can't use a BYTEA type
sequence_id BIGINT
inbox_id TEXT, -- Because this is serialized as JSON, we can't use a BYTEA type
sequence_id BIGINT
);

10 changes: 10 additions & 0 deletions pkg/migrations/mls/20250310145526_upgrade-identity.down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
SET
statement_timeout = 0;

DROP INDEX idx_address_log_identifier_inbox_id;

ALTER TABLE address_log
DROP COLUMN identifier_kind
RENAME COLUMN identifier TO address;

CREATE INDEX idx_address_log_address_inbox_id ON address_log (address, inbox_id);
22 changes: 22 additions & 0 deletions pkg/migrations/mls/20250310145526_upgrade-identity.up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
SET
statement_timeout = 0;

DROP INDEX idx_address_log_address_inbox_id;

ALTER TABLE address_log
ADD COLUMN identifier_kind INT;

ALTER TABLE address_log
RENAME COLUMN address TO identifier;

-- Default all of the existing identifier_kinds to 1 (Ethereum)
UPDATE address_log
SET
identifier_kind = 1;

ALTER TABLE address_log
ALTER COLUMN identifier_kind
SET
NOT NULL;
Comment on lines +12 to +20
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know the internals of PG well enough to remember whether alter table holds an exclusive table lock or not. If an insert happens between the UPDATE and the ALTER, it will have a null column. There has to be a way to do it in one statement...

Copy link
Contributor

@mkysel mkysel Mar 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would do it like this

ALTER TABLE address_log ADD COLUMN identifier_kind INT DEFAULT 1;
UPDATE address_log SET identifier_kind = 1 WHERE identifier_kind IS NULL;
ALTER TABLE address_log ALTER COLUMN identifier_kind SET NOT NULL;

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You're right, I did write this with the assumption that the migration holds an exclusive lock. I can just be safe and use your approach.


CREATE INDEX idx_address_log_identifier_inbox_id ON address_log (identifier, identifier_kind, inbox_id);
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@neekolas Do you think removing and adding a new index on this table will be an issue?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think it's a huge deal because the table is relatively small. But it will block the node from coming up until it's completed.

Would be better if we could add the index concurrently, but IIRC we get errors doing that because it happens in a transaction

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

CREATE INDEX CONCURRENTLY is what you might want. It takes twice as long, but it does not block concurrent transactions. I don't know whether the newest version of PG still releases the table lock and commits the ongoing transaction implicitly or not.

Since its a secondary non-unique index, you don't really care if it does not exist for a period of time. It will just make some queries slower.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

From my recollection we can't use it in our migrations because the migration runner wraps everything in a transaction, and transactions can't create indexes concurrently. But my knowledge of this codebase is very out of date and maybe the issue is resolved.

You can try it. If the issue is still there every test that touches the DB will fail.

26 changes: 15 additions & 11 deletions pkg/mls/store/queries.sql
Original file line number Diff line number Diff line change
@@ -34,27 +34,29 @@ FROM

-- name: GetAddressLogs :many
SELECT
a.address,
a.identifier,
a.identifier_kind,
encode(a.inbox_id, 'hex') AS inbox_id,
a.association_sequence_id
FROM
address_log a
INNER JOIN (
SELECT
address,
identifier,
identifier_kind,
MAX(association_sequence_id) AS max_association_sequence_id
FROM
address_log
WHERE
address = ANY (@addresses::TEXT[])
(identifier, identifier_kind) IN (SELECT unnest(@identifiers::TEXT[]), unnest(@identifier_kinds::INT[]))
AND revocation_sequence_id IS NULL
GROUP BY
address) b ON a.address = b.address
identifier, identifier_kind) b ON a.identifier = b.identifier
AND a.association_sequence_id = b.max_association_sequence_id;

-- name: InsertAddressLog :one
INSERT INTO address_log(address, inbox_id, association_sequence_id, revocation_sequence_id)
VALUES (@address, decode(@inbox_id, 'hex'), @association_sequence_id, @revocation_sequence_id)
INSERT INTO address_log(identifier, identifier_kind, inbox_id, association_sequence_id, revocation_sequence_id)
VALUES (@identifier, @identifier_kind, decode(@inbox_id, 'hex'), @association_sequence_id, @revocation_sequence_id)
RETURNING
*;

@@ -69,18 +71,21 @@ UPDATE
address_log
SET
revocation_sequence_id = @revocation_sequence_id
WHERE (address, inbox_id, association_sequence_id) =(
WHERE (identifier, identifier_kind, inbox_id, association_sequence_id) =(
SELECT
address,
identifier,
identifier_kind,
inbox_id,
MAX(association_sequence_id)
FROM
address_log AS a
WHERE
a.address = @address
a.identifier = @identifier
AND a.identifier_kind = @identifier_kind
AND a.inbox_id = decode(@inbox_id, 'hex')
GROUP BY
address,
identifier,
identifier_kind,
inbox_id);

-- name: CreateOrUpdateInstallation :exec
@@ -217,4 +222,3 @@ WHERE
ORDER BY
id DESC
LIMIT @numrows;

2 changes: 1 addition & 1 deletion pkg/mls/store/queries/db.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions pkg/mls/store/queries/models.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading