diff --git a/e2e/tests/rate_limiting/rate_limiting_test.go b/e2e/tests/rate_limiting/rate_limiting_test.go new file mode 100644 index 00000000000..38cc6c23bd8 --- /dev/null +++ b/e2e/tests/rate_limiting/rate_limiting_test.go @@ -0,0 +1,259 @@ +//go:build !test_e2e + +package ratelimiting + +import ( + "context" + "testing" + + interchaintest "github.com/cosmos/interchaintest/v10" + ibc "github.com/cosmos/interchaintest/v10/ibc" + testutil "github.com/cosmos/interchaintest/v10/testutil" + testifysuite "github.com/stretchr/testify/suite" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + "github.com/cosmos/ibc-go/e2e/testsuite" + "github.com/cosmos/ibc-go/e2e/testsuite/query" + "github.com/cosmos/ibc-go/e2e/testvalues" + ratelimitingtypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +type RateLimTestSuite struct { + testsuite.E2ETestSuite +} + +func TestRateLimitSuite(t *testing.T) { + testifysuite.Run(t, new(RateLimTestSuite)) +} + +func (s *RateLimTestSuite) SetupSuite() { + s.SetupChains(context.TODO(), 2, nil, func(options *testsuite.ChainOptions) { + options.RelayerCount = 1 + }) +} + +func (s *RateLimTestSuite) TestRateLimit() { + t := s.T() + ctx := context.TODO() + testName := t.Name() + + chainA, chainB := s.GetChains() + + userA := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) + userB := s.CreateUserOnChainB(ctx, testvalues.StartingTokenAmount) + + authority, err := query.ModuleAccountAddress(ctx, govtypes.ModuleName, chainA) + s.Require().NoError(err) + s.Require().NotNil(authority) + + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + s.StartRelayer(relayer, testName) + + chanAB := s.GetChannelBetweenChains(testName, chainA, chainB) + + escrowAddrA := transfertypes.GetEscrowAddress(chanAB.PortID, chanAB.ChannelID) + denomA := chainA.Config().Denom + + ibcTokenB := testsuite.GetIBCToken(denomA, chanAB.PortID, chanAB.ChannelID) + + t.Run("No rate limit set: transfer succeeds", func(_ *testing.T) { + userABalBefore, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + userBBalBefore, err := query.Balance(ctx, chainB, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Zero(userBBalBefore.Int64()) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + + packet, err := ibctesting.ParseV1PacketFromEvents(txResp.Events) + s.Require().NoError(err) + s.Require().NotNil(packet) + + s.Require().NoError(testutil.WaitForBlocks(ctx, 5, chainA, chainB), "failed to wait for blocks") + s.AssertPacketRelayed(ctx, chainA, chanAB.PortID, chanAB.ChannelID, packet.Sequence) + + userABalAfter, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + + // Balanced moved form userA to userB + s.Require().Equal(userABalBefore-testvalues.IBCTransferAmount, userABalAfter) + escrowBalA, err := query.Balance(ctx, chainA, escrowAddrA.String(), denomA) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalA.Int64()) + + userBBalAfter, err := query.Balance(ctx, chainB, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, userBBalAfter.Int64()) + }) + + t.Run("Add outgoing rate limit on ChainA", func(_ *testing.T) { + resp, err := query.GRPCQuery[ratelimitingtypes.QueryAllRateLimitsResponse](ctx, chainA, &ratelimitingtypes.QueryAllRateLimitsRequest{}) + s.Require().NoError(err) + s.Require().Nil(resp.RateLimits) + + sendPercentage := int64(10) + recvPercentage := int64(0) + s.addRateLimit(ctx, chainA, userA, denomA, chanAB.ChannelID, authority.String(), sendPercentage, recvPercentage, 1) + + resp, err = query.GRPCQuery[ratelimitingtypes.QueryAllRateLimitsResponse](ctx, chainA, &ratelimitingtypes.QueryAllRateLimitsRequest{}) + s.Require().NoError(err) + s.Require().Len(resp.RateLimits, 1) + + rateLimit := resp.RateLimits[0] + s.Require().Equal(rateLimit.Flow.Outflow.Int64(), int64(0)) + s.Require().Equal(rateLimit.Flow.Inflow.Int64(), int64(0)) + s.Require().Equal(rateLimit.Quota.MaxPercentSend.Int64(), sendPercentage) + s.Require().Equal(rateLimit.Quota.MaxPercentRecv.Int64(), recvPercentage) + s.Require().Equal(rateLimit.Quota.DurationHours, uint64(1)) + }) + + t.Run("Transfer updates the rate limit flow", func(_ *testing.T) { + userABalBefore, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + + packet, err := ibctesting.ParseV1PacketFromEvents(txResp.Events) + s.Require().NoError(err) + s.Require().NotNil(packet) + + s.Require().NoError(testutil.WaitForBlocks(ctx, 5, chainA, chainB), "failed to wait for blocks") + s.AssertPacketRelayed(ctx, chainA, chanAB.PortID, chanAB.ChannelID, packet.Sequence) + + userABalAfter, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + + // Balanced moved form userA to userB + s.Require().Equal(userABalBefore-testvalues.IBCTransferAmount, userABalAfter) + userBBalAfter, err := query.Balance(ctx, chainB, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(2*testvalues.IBCTransferAmount, userBBalAfter.Int64()) + + // Check the flow has been updated. + rateLimit := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + s.Require().NotNil(rateLimit) + s.Require().Equal(rateLimit.Flow.Outflow.Int64(), testvalues.IBCTransferAmount) + }) + + t.Run("Fill and exceed quota", func(_ *testing.T) { + rateLim := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + sendPercentage := rateLim.Quota.MaxPercentSend.Int64() + + // Create an account that can almost exhause the outflow limit. + richKidAmt := rateLim.Flow.ChannelValue.MulRaw(sendPercentage).QuoRaw(100).Sub(rateLim.Flow.Outflow) + richKid := interchaintest.GetAndFundTestUsers(t, ctx, "richkid", richKidAmt, chainA)[0] + s.Require().NoError(testutil.WaitForBlocks(ctx, 4, chainA)) + + sendCoin := sdk.NewCoin(denomA, richKidAmt) + + // Fill the quota + txResp := s.Transfer(ctx, chainA, richKid, chanAB.PortID, chanAB.ChannelID, sendCoin, richKid.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + + // Sending even 10denomA fails due to exceeding the quota + sendCoin = sdk.NewInt64Coin(denomA, 10) + txResp = s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, sendCoin, userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxFailure(txResp, ratelimitingtypes.ErrQuotaExceeded) + }) + + t.Run("Reset rate limit: transfer succeeds", func(_ *testing.T) { + rateLimit := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + sendPercentage := rateLimit.Quota.MaxPercentSend.Int64() + recvPercentage := rateLimit.Quota.MaxPercentRecv.Int64() + + s.resetRateLimit(ctx, chainA, userA, denomA, chanAB.ChannelID, authority.String()) + + rateLimit = s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + // Resetting only clears the flow. It does not change the quota + s.Require().Zero(rateLimit.Flow.Outflow.Int64()) + s.Require().Equal(rateLimit.Quota.MaxPercentSend.Int64(), sendPercentage) + s.Require().Equal(rateLimit.Quota.MaxPercentRecv.Int64(), recvPercentage) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + }) + + t.Run("Set outflow quota to 0: transfer fails", func(_ *testing.T) { + sendPercentage := int64(0) + recvPercentage := int64(1) + s.updateRateLimit(ctx, chainA, userA, denomA, chanAB.ChannelID, authority.String(), sendPercentage, recvPercentage) + + rateLimit := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + s.Require().Equal(rateLimit.Quota.MaxPercentSend.Int64(), sendPercentage) + s.Require().Equal(rateLimit.Quota.MaxPercentRecv.Int64(), recvPercentage) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxFailure(txResp, ratelimitingtypes.ErrQuotaExceeded) + }) + + t.Run("Remove rate limit -> transfer succeeds again", func(_ *testing.T) { + s.removeRateLimit(ctx, chainA, userA, denomA, chanAB.ChannelID, authority.String()) + + rateLimit := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + s.Require().Nil(rateLimit) + + // Transfer works again + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + }) +} + +func (s *RateLimTestSuite) rateLimit(ctx context.Context, chain ibc.Chain, denom, chanID string) *ratelimitingtypes.RateLimit { + respRateLim, err := query.GRPCQuery[ratelimitingtypes.QueryRateLimitResponse](ctx, chain, &ratelimitingtypes.QueryRateLimitRequest{ + Denom: denom, + ChannelOrClientId: chanID, + }) + s.Require().NoError(err) + return respRateLim.RateLimit +} + +func (s *RateLimTestSuite) addRateLimit(ctx context.Context, chain ibc.Chain, user ibc.Wallet, denom, chanID, authority string, sendPercent, recvPercent, duration int64) { + msg := &ratelimitingtypes.MsgAddRateLimit{ + Signer: authority, + Denom: denom, + ChannelOrClientId: chanID, + MaxPercentSend: sdkmath.NewInt(sendPercent), + MaxPercentRecv: sdkmath.NewInt(recvPercent), + DurationHours: uint64(duration), + } + s.ExecuteAndPassGovV1Proposal(ctx, msg, chain, user) +} + +func (s *RateLimTestSuite) resetRateLimit(ctx context.Context, chain ibc.Chain, user ibc.Wallet, denom, chanID, authority string) { + msg := &ratelimitingtypes.MsgResetRateLimit{ + Signer: authority, + Denom: denom, + ChannelOrClientId: chanID, + } + s.ExecuteAndPassGovV1Proposal(ctx, msg, chain, user) +} + +func (s *RateLimTestSuite) updateRateLimit(ctx context.Context, chain ibc.Chain, user ibc.Wallet, denom, chanID, authority string, sendPercent, recvPercent int64) { + msg := &ratelimitingtypes.MsgUpdateRateLimit{ + Signer: authority, + Denom: denom, + ChannelOrClientId: chanID, + MaxPercentSend: sdkmath.NewInt(sendPercent), + MaxPercentRecv: sdkmath.NewInt(recvPercent), + DurationHours: 1, + } + s.ExecuteAndPassGovV1Proposal(ctx, msg, chain, user) +} + +func (s *RateLimTestSuite) removeRateLimit(ctx context.Context, chain ibc.Chain, user ibc.Wallet, denom, chanID, authority string) { + msg := &ratelimitingtypes.MsgRemoveRateLimit{ + Signer: authority, + Denom: denom, + ChannelOrClientId: chanID, + } + s.ExecuteAndPassGovV1Proposal(ctx, msg, chain, user) +} diff --git a/e2e/testsuite/codec.go b/e2e/testsuite/codec.go index 3c524b949da..01f553735cb 100644 --- a/e2e/testsuite/codec.go +++ b/e2e/testsuite/codec.go @@ -27,6 +27,7 @@ import ( icacontrollertypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/controller/types" icahosttypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/types" packetforwardtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + ratelimitingtypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" v7migrations "github.com/cosmos/ibc-go/v10/modules/core/02-client/migrations/v7" clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" @@ -73,6 +74,7 @@ func codecAndEncodingConfig() (*codec.ProtoCodec, testutil.TestEncodingConfig) { wasmtypes.RegisterInterfaces(cfg.InterfaceRegistry) channeltypesv2.RegisterInterfaces(cfg.InterfaceRegistry) packetforwardtypes.RegisterInterfaces(cfg.InterfaceRegistry) + ratelimitingtypes.RegisterInterfaces(cfg.InterfaceRegistry) // all other types upgradetypes.RegisterInterfaces(cfg.InterfaceRegistry) diff --git a/go.mod b/go.mod index 02983c01ee8..5d2cc081933 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( cloud.google.com/go/iam v1.2.2 // indirect cloud.google.com/go/monitoring v1.21.2 // indirect cloud.google.com/go/storage v1.49.0 // indirect - cosmossdk.io/collections v1.3.0 // indirect + cosmossdk.io/collections v1.3.1 // indirect cosmossdk.io/depinject v1.2.1 // indirect cosmossdk.io/schema v1.1.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect @@ -95,7 +95,7 @@ require ( github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/getsentry/sentry-go v0.32.0 // indirect + github.com/getsentry/sentry-go v0.33.0 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect diff --git a/go.sum b/go.sum index 973abee3c59..bbc48526c8d 100644 --- a/go.sum +++ b/go.sum @@ -616,8 +616,8 @@ cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= -cosmossdk.io/collections v1.3.0 h1:RUY23xXBy/bu5oSHZ5y+mkJRyA4ZboKDO4Yvx4+g2uc= -cosmossdk.io/collections v1.3.0/go.mod h1:cqVpBMDGEYhuNmNSXIOmqpnQ7Eav43hpJIetzLuEkns= +cosmossdk.io/collections v1.3.1 h1:09e+DUId2brWsNOQ4nrk+bprVmMUaDH9xvtZkeqIjVw= +cosmossdk.io/collections v1.3.1/go.mod h1:ynvkP0r5ruAjbmedE+vQ07MT6OtJ0ZIDKrtJHK7Q/4c= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= @@ -909,8 +909,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= -github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/getsentry/sentry-go v0.33.0 h1:YWyDii0KGVov3xOaamOnF0mjOrqSjBqwv48UEzn7QFg= +github.com/getsentry/sentry-go v0.33.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= diff --git a/modules/apps/packet-forward-middleware/keeper/keeper.go b/modules/apps/packet-forward-middleware/keeper/keeper.go index 73ab98adabd..e702b48ef90 100644 --- a/modules/apps/packet-forward-middleware/keeper/keeper.go +++ b/modules/apps/packet-forward-middleware/keeper/keeper.go @@ -74,6 +74,16 @@ func (k *Keeper) SetTransferKeeper(transferKeeper types.TransferKeeper) { k.transferKeeper = transferKeeper } +// SetICS4Wrapper sets the ICS4 wrapper. +func (k *Keeper) SetICS4Wrapper(ics4Wrapper porttypes.ICS4Wrapper) { + k.ics4Wrapper = ics4Wrapper +} + +// ICS4Wrapper gets the ICS4 Wrapper for PFM. +func (k *Keeper) ICS4Wrapper() porttypes.ICS4Wrapper { + return k.ics4Wrapper +} + // Logger returns a module-specific logger. func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+ibcexported.ModuleName+"-"+types.ModuleName) diff --git a/modules/apps/rate-limiting/client/cli/cli.go b/modules/apps/rate-limiting/client/cli/cli.go new file mode 100644 index 00000000000..c9a614c84ac --- /dev/null +++ b/modules/apps/rate-limiting/client/cli/cli.go @@ -0,0 +1,27 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" +) + +// GetQueryCmd returns the cli query commands for this module. +func GetQueryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "ratelimiting", + Short: "IBC ratelimiting querying subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetCmdQueryRateLimit(), + GetCmdQueryAllRateLimits(), + GetCmdQueryRateLimitsByChainID(), + GetCmdQueryAllBlacklistedDenoms(), + GetCmdQueryAllWhitelistedAddresses(), + ) + return cmd +} diff --git a/modules/apps/rate-limiting/client/cli/query.go b/modules/apps/rate-limiting/client/cli/query.go new file mode 100644 index 00000000000..702b451d017 --- /dev/null +++ b/modules/apps/rate-limiting/client/cli/query.go @@ -0,0 +1,196 @@ +package cli + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/version" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +const ( + FlagDenom = "denom" +) + +// GetCmdQueryRateLimit implements a command to query rate limits by channel-id or client-id and denom +func GetCmdQueryRateLimit() *cobra.Command { + cmd := &cobra.Command{ + Use: "rate-limit [channel-or-client-id]", + Short: "Query rate limits from a given channel-id/client-id and denom", + Long: strings.TrimSpace( + fmt.Sprintf(`Query rate limits from a given channel-id/client-id and denom. +If the denom flag is omitted, all rate limits for the given channel-id/client-id are returned. + +Example: + $ %s query %s rate-limit [channel-or-client-id] + $ %s query %s rate-limit [channel-or-client-id] --denom=[denom] +`, + version.AppName, types.ModuleName, version.AppName, types.ModuleName, + ), + ), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + channelOrClientID := args[0] + denom, err := cmd.Flags().GetString(FlagDenom) + if err != nil { + return err + } + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + // Query all rate limits for the channel/client ID if denom is not specified. + if denom == "" { + req := &types.QueryRateLimitsByChannelOrClientIDRequest{ + ChannelOrClientId: channelOrClientID, + } + res, err := queryClient.RateLimitsByChannelOrClientID(context.Background(), req) + if err != nil { + return err + } + return clientCtx.PrintProto(res) + } + + // Query specific rate limit if denom is provided + req := &types.QueryRateLimitRequest{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + } + res, err := queryClient.RateLimit(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res.RateLimit) + }, + } + + cmd.Flags().String(FlagDenom, "", "The denom identifying a specific rate limit") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryAllRateLimits return all available rate limits. +func GetCmdQueryAllRateLimits() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-rate-limits", + Short: "Query for all rate limits", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryAllRateLimitsRequest{} + res, err := queryClient.AllRateLimits(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryRateLimitsByChainID return all rate limits that exist between this chain +// and the specified ChainId +func GetCmdQueryRateLimitsByChainID() *cobra.Command { + cmd := &cobra.Command{ + Use: "rate-limits-by-chain [chain-id]", + Short: "Query for all rate limits associated with the channels/clients connecting to the given ChainID", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + chainID := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryRateLimitsByChainIDRequest{ + ChainId: chainID, + } + res, err := queryClient.RateLimitsByChainID(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryAllBlacklistedDenoms returns the command to query all blacklisted denoms +func GetCmdQueryAllBlacklistedDenoms() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-blacklisted-denoms", + Short: "Query for all blacklisted denoms", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryAllBlacklistedDenomsRequest{} + res, err := queryClient.AllBlacklistedDenoms(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetCmdQueryAllWhitelistedAddresses returns the command to query all whitelisted address pairs +func GetCmdQueryAllWhitelistedAddresses() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-whitelisted-addresses", + Short: "Query for all whitelisted address pairs", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryAllWhitelistedAddressesRequest{} + res, err := queryClient.AllWhitelistedAddresses(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + return cmd +} diff --git a/modules/apps/rate-limiting/doc.go b/modules/apps/rate-limiting/doc.go new file mode 100644 index 00000000000..75c737aaa3a --- /dev/null +++ b/modules/apps/rate-limiting/doc.go @@ -0,0 +1,8 @@ +/* +Package rate-limiting implements a middleware to rate limit IBC transfers +between different chains to prevent excessive token flow in either direction. +This module monitors and enforces configurable rate limits on token transfers +across IBC channels to protect chains from economic attacks or unintended +token drainage. +*/ +package ratelimiting diff --git a/modules/apps/rate-limiting/ibc_middleware.go b/modules/apps/rate-limiting/ibc_middleware.go new file mode 100644 index 00000000000..be3d35679d9 --- /dev/null +++ b/modules/apps/rate-limiting/ibc_middleware.go @@ -0,0 +1,132 @@ +package ratelimiting + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channelkeeper "github.com/cosmos/ibc-go/v10/modules/core/04-channel/keeper" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +var ( + _ porttypes.Middleware = (*IBCMiddleware)(nil) + _ porttypes.PacketUnmarshalarModule = (*IBCMiddleware)(nil) +) + +// IBCMiddleware implements the ICS26 callbacks for the rate-limiting middleware. +type IBCMiddleware struct { + app porttypes.PacketUnmarshalarModule + keeper keeper.Keeper + ics4Wrapper porttypes.ICS4Wrapper +} + +// NewIBCMiddleware creates a new IBCMiddleware given the keeper, underlying application, and channel keeper. +func NewIBCMiddleware(app porttypes.PacketUnmarshalarModule, k keeper.Keeper, ck *channelkeeper.Keeper) IBCMiddleware { + return IBCMiddleware{ + app: app, + keeper: k, + ics4Wrapper: ck, + } +} + +// OnChanOpenInit implements the IBCMiddleware interface. Call underlying app's OnChanOpenInit. +func (im IBCMiddleware) OnChanOpenInit(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID string, channelID string, counterparty channeltypes.Counterparty, version string) (string, error) { + return im.app.OnChanOpenInit(ctx, order, connectionHops, portID, channelID, counterparty, version) +} + +// OnChanOpenTry implements the IBCMiddleware interface. Call underlying app's OnChanOpenTry. +func (im IBCMiddleware) OnChanOpenTry(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, counterparty channeltypes.Counterparty, counterpartyVersion string) (string, error) { + return im.app.OnChanOpenTry(ctx, order, connectionHops, portID, channelID, counterparty, counterpartyVersion) +} + +// OnChanOpenAck implements the IBCMiddleware interface. Call underlying app's OnChanOpenAck. +func (im IBCMiddleware) OnChanOpenAck(ctx sdk.Context, portID, channelID string, counterpartyChannelID string, counterpartyVersion string) error { + return im.app.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) +} + +// OnChanOpenConfirm implements the IBCMiddleware interface. Call underlying app's OnChanOpenConfirm. +func (im IBCMiddleware) OnChanOpenConfirm(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanOpenConfirm(ctx, portID, channelID) +} + +// OnChanCloseInit implements the IBCMiddleware interface. Call underlying app's OnChanCloseInit. +func (im IBCMiddleware) OnChanCloseInit(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanCloseInit(ctx, portID, channelID) +} + +// OnChanCloseConfirm implements the IBCMiddleware interface. Call underlying app's OnChanCloseConfirm. +func (im IBCMiddleware) OnChanCloseConfirm(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanCloseConfirm(ctx, portID, channelID) +} + +// OnRecvPacket implements the IBCMiddleware interface. +// Rate limits the incoming packet. If the packet is allowed, call underlying app's OnRecvPacket. +func (im IBCMiddleware) OnRecvPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) ibcexported.Acknowledgement { + if err := im.keeper.ReceiveRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error("Receive packet rate limited", "error", err) + return channeltypes.NewErrorAcknowledgement(err) + } + + // If the packet was not rate-limited, pass it down to the underlying app's OnRecvPacket callback + return im.app.OnRecvPacket(ctx, channelVersion, packet, relayer) +} + +// OnAcknowledgementPacket implements the IBCMiddleware interface. +// If the acknowledgement was an error, revert the outflow amount. +// Then, call underlying app's OnAcknowledgementPacket. +func (im IBCMiddleware) OnAcknowledgementPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress) error { + if err := im.keeper.AcknowledgeRateLimitedPacket(ctx, packet, acknowledgement); err != nil { + im.keeper.Logger(ctx).Error("Rate limit OnAcknowledgementPacket failed", "error", err) + } + + return im.app.OnAcknowledgementPacket(ctx, channelVersion, packet, acknowledgement, relayer) +} + +// OnTimeoutPacket implements the IBCMiddleware interface. +// Revert the outflow amount. Then, call underlying app's OnTimeoutPacket. +func (im IBCMiddleware) OnTimeoutPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) error { + if err := im.keeper.TimeoutRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error("Rate limit OnTimeoutPacket failed", "error", err) + } + + return im.app.OnTimeoutPacket(ctx, channelVersion, packet, relayer) +} + +// SendPacket implements the ICS4 Wrapper interface. +// It calls the keeper's SendRateLimitedPacket function first to check the rate limit. +// If the packet is allowed, it then calls the underlying ICS4Wrapper SendPacket. +func (im IBCMiddleware) SendPacket(ctx sdk.Context, sourcePort string, sourceChannel string, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, data []byte) (uint64, error) { + err := im.keeper.SendRateLimitedPacket(ctx, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 packet send was denied by rate limiter", "error", err) + return 0, err + } + + seq, err := im.ics4Wrapper.SendPacket(ctx, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) + if err != nil { + return 0, err + } + + return seq, nil +} + +// WriteAcknowledgement implements the ICS4 Wrapper interface. +// It calls the underlying ICS4Wrapper. +func (im IBCMiddleware) WriteAcknowledgement(ctx sdk.Context, packet ibcexported.PacketI, ack ibcexported.Acknowledgement) error { + return im.ics4Wrapper.WriteAcknowledgement(ctx, packet, ack) +} + +// GetAppVersion implements the ICS4 Wrapper interface. +// It calls the underlying ICS4Wrapper. +func (im IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return im.ics4Wrapper.GetAppVersion(ctx, portID, channelID) +} + +// UnmarshalPacketData implements the PacketDataUnmarshaler interface. +// It defers to the underlying app to unmarshal the packet data. +func (im IBCMiddleware) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { + return im.app.UnmarshalPacketData(ctx, portID, channelID, bz) +} diff --git a/modules/apps/rate-limiting/keeper/abci.go b/modules/apps/rate-limiting/keeper/abci.go new file mode 100644 index 00000000000..702d0c3c17b --- /dev/null +++ b/modules/apps/rate-limiting/keeper/abci.go @@ -0,0 +1,26 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Before each hour epoch, check if any of the rate limits have expired, +// and reset them if they have +func (k Keeper) BeginBlocker(ctx sdk.Context) { + epochStarting, epochNumber, err := k.CheckHourEpochStarting(ctx) + if err != nil { + k.Logger(ctx).Error("BeginBlocker", "error", err) + return + } + if !epochStarting { + return + } + for _, rateLimit := range k.GetAllRateLimits(ctx) { + if rateLimit.Quota.DurationHours == 0 || epochNumber%rateLimit.Quota.DurationHours != 0 { + continue + } + if err := k.ResetRateLimit(ctx, rateLimit.Path.Denom, rateLimit.Path.ChannelOrClientId); err != nil { + k.Logger(ctx).Error("Unable to reset quota", "Denom", rateLimit.Path.Denom, "ChannelOrClientId", rateLimit.Path.ChannelOrClientId, "error", err) + } + } +} diff --git a/modules/apps/rate-limiting/keeper/abci_test.go b/modules/apps/rate-limiting/keeper/abci_test.go new file mode 100644 index 00000000000..d97545d0673 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/abci_test.go @@ -0,0 +1,100 @@ +package keeper_test + +import ( + "fmt" + "time" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Store a rate limit with a non-zero flow for each duration +func (s *KeeperTestSuite) resetRateLimits(denom string, durations []uint64, nonZeroFlow int64) { + // Add/reset rate limit with a quota duration hours for each duration in the list + for i, duration := range durations { + channelID := fmt.Sprintf("channel-%d", i) + + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelOrClientId: channelID, + }, + Quota: &types.Quota{ + DurationHours: duration, + }, + Flow: &types.Flow{ + Inflow: sdkmath.NewInt(nonZeroFlow), + Outflow: sdkmath.NewInt(nonZeroFlow), + ChannelValue: sdkmath.NewInt(100), + }, + }) + } +} + +func (s *KeeperTestSuite) TestBeginBlocker_NoPanic() { + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), types.HourEpoch{ + Duration: 0, + }) + s.Require().NoError(err) + s.Require().NotPanics(func() { + s.chainA.GetSimApp().RateLimitKeeper.BeginBlocker(s.chainA.GetContext()) + }) +} + +func (s *KeeperTestSuite) TestBeginBlocker_ReturnsWhenEpochInPast() { + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), types.HourEpoch{ + Duration: time.Minute, + EpochStartTime: time.Now().Add(time.Hour * -1), + }) + s.Require().NoError(err) + s.Require().NotPanics(func() { + s.chainA.GetSimApp().RateLimitKeeper.BeginBlocker(s.chainA.GetContext()) + }) +} + +func (s *KeeperTestSuite) TestBeginBlocker() { + // We'll create three rate limits with different durations + // And then pass in epoch ids that will cause each to trigger a reset in order + // i.e. epochId 2 will only cause duration 2 to trigger (2 % 2 == 0; and 9 % 2 != 0; 25 % 2 != 0), + // epochId 9, will only cause duration 3 to trigger (9 % 2 != 0; and 9 % 3 == 0; 25 % 3 != 0) + // epochId 25, will only cause duration 5 to trigger (9 % 5 != 0; and 9 % 5 != 0; 25 % 5 == 0) + durations := []uint64{2, 3, 5} + epochIDs := []uint64{2, 9, 25} + nonZeroFlow := int64(10) + + blockTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + s.coordinator.SetTime(blockTime) + + for i, epochID := range epochIDs { + // First reset the rate limits to they have a non-zero flow + s.resetRateLimits(denom, durations, nonZeroFlow) + + duration := durations[i] + channelIDFromResetRateLimit := fmt.Sprintf("channel-%d", i) + + // Setup epochs so that the hook triggers + // (epoch start time + duration must be before block time) + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), types.HourEpoch{ + EpochNumber: epochID - 1, + Duration: time.Minute, + EpochStartTime: blockTime.Add(-2 * time.Minute), + }) + s.Require().NoError(err) + s.chainA.GetSimApp().RateLimitKeeper.BeginBlocker(s.chainA.GetContext()) + + // Check rate limits (only one rate limit should reset for each hook trigger) + rateLimits := s.chainA.GetSimApp().RateLimitKeeper.GetAllRateLimits(s.chainA.GetContext()) + for _, rateLimit := range rateLimits { + context := fmt.Sprintf("duration: %d, epoch: %d", duration, epochID) + + if rateLimit.Path.ChannelOrClientId == channelIDFromResetRateLimit { + s.Require().Equal(int64(0), rateLimit.Flow.Inflow.Int64(), "inflow was not reset to 0 - %s", context) + s.Require().Equal(int64(0), rateLimit.Flow.Outflow.Int64(), "outflow was not reset to 0 - %s", context) + } else { + s.Require().Equal(nonZeroFlow, rateLimit.Flow.Inflow.Int64(), "inflow should have been left unchanged - %s", context) + s.Require().Equal(nonZeroFlow, rateLimit.Flow.Outflow.Int64(), "outflow should have been left unchanged - %s", context) + } + } + } +} diff --git a/modules/apps/rate-limiting/keeper/blacklist.go b/modules/apps/rate-limiting/keeper/blacklist.go new file mode 100644 index 00000000000..e539412ad6d --- /dev/null +++ b/modules/apps/rate-limiting/keeper/blacklist.go @@ -0,0 +1,54 @@ +package keeper + +import ( + "cosmossdk.io/store/prefix" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Adds a denom to a blacklist to prevent all IBC transfers with that denom +func (k Keeper) AddDenomToBlacklist(ctx sdk.Context, denom string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.DenomBlacklistKeyPrefix) + key := []byte(denom) + store.Set(key, []byte{1}) +} + +// Removes a denom from a blacklist to re-enable IBC transfers for that denom +func (k Keeper) RemoveDenomFromBlacklist(ctx sdk.Context, denom string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.DenomBlacklistKeyPrefix) + key := []byte(denom) + store.Delete(key) +} + +// Check if a denom is currently blacklisted +func (k Keeper) IsDenomBlacklisted(ctx sdk.Context, denom string) bool { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.DenomBlacklistKeyPrefix) + + key := []byte(denom) + value := store.Get(key) + found := len(value) != 0 + + return found +} + +// Get all the blacklisted denoms +func (k Keeper) GetAllBlacklistedDenoms(ctx sdk.Context) []string { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.DenomBlacklistKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allBlacklistedDenoms := []string{} + for ; iterator.Valid(); iterator.Next() { + allBlacklistedDenoms = append(allBlacklistedDenoms, string(iterator.Key())) + } + + return allBlacklistedDenoms +} diff --git a/modules/apps/rate-limiting/keeper/blacklist_test.go b/modules/apps/rate-limiting/keeper/blacklist_test.go new file mode 100644 index 00000000000..358c541b973 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/blacklist_test.go @@ -0,0 +1,47 @@ +package keeper_test + +import "slices" + +func (s *KeeperTestSuite) TestDenomBlacklist() { + allDenoms := []string{"denom1", "denom2", "denom3", "denom4"} + denomsToBlacklist := []string{"denom1", "denom3"} + + // No denoms are currently blacklisted + for _, denom := range allDenoms { + isBlacklisted := s.chainA.GetSimApp().RateLimitKeeper.IsDenomBlacklisted(s.chainA.GetContext(), denom) + s.Require().False(isBlacklisted, "%s should not be blacklisted yet", denom) + } + + // Blacklist two denoms + for _, denom := range denomsToBlacklist { + s.chainA.GetSimApp().RateLimitKeeper.AddDenomToBlacklist(s.chainA.GetContext(), denom) + } + + // Confirm half the list was blacklisted and the others were not + for _, denom := range allDenoms { + isBlacklisted := s.chainA.GetSimApp().RateLimitKeeper.IsDenomBlacklisted(s.chainA.GetContext(), denom) + + if slices.Contains(denomsToBlacklist, denom) { + s.Require().True(isBlacklisted, "%s should have been blacklisted", denom) + continue + } + s.Require().False(isBlacklisted, "%s should not have been blacklisted", denom) + } + actualBlacklistedDenoms := s.chainA.GetSimApp().RateLimitKeeper.GetAllBlacklistedDenoms(s.chainA.GetContext()) + s.Require().Len(actualBlacklistedDenoms, len(denomsToBlacklist), "number of blacklisted denoms") + s.Require().ElementsMatch(denomsToBlacklist, actualBlacklistedDenoms, "list of blacklisted denoms") + + // Finally, remove denoms from blacklist and confirm they were removed + for _, denom := range denomsToBlacklist { + s.chainA.GetSimApp().RateLimitKeeper.RemoveDenomFromBlacklist(s.chainA.GetContext(), denom) + } + for _, denom := range allDenoms { + isBlacklisted := s.chainA.GetSimApp().RateLimitKeeper.IsDenomBlacklisted(s.chainA.GetContext(), denom) + + if slices.Contains(denomsToBlacklist, denom) { + s.Require().False(isBlacklisted, "%s should have been removed from the blacklist", denom) + continue + } + s.Require().False(isBlacklisted, "%s should never have been blacklisted", denom) + } +} diff --git a/modules/apps/rate-limiting/keeper/epoch.go b/modules/apps/rate-limiting/keeper/epoch.go new file mode 100644 index 00000000000..884cfc4d403 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/epoch.go @@ -0,0 +1,72 @@ +package keeper + +import ( + errorsmod "cosmossdk.io/errors" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Stores the hour epoch +func (k Keeper) SetHourEpoch(ctx sdk.Context, epoch types.HourEpoch) error { + store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + epochBz, err := k.cdc.Marshal(&epoch) + if err != nil { + return err + } + store.Set(types.HourEpochKey, epochBz) + return nil +} + +// Reads the hour epoch from the store +// Returns a zero-value epoch and logs an error if the epoch is not found or fails to unmarshal. +func (k Keeper) GetHourEpoch(ctx sdk.Context) (types.HourEpoch, error) { + store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + + var epoch types.HourEpoch + epochBz := store.Get(types.HourEpochKey) + if len(epochBz) == 0 { + return types.HourEpoch{}, types.ErrEpochNotFound + } + + if err := k.cdc.Unmarshal(epochBz, &epoch); err != nil { + return types.HourEpoch{}, errorsmod.Wrapf(types.ErrUnmarshalEpoch, "error: %s", err.Error()) + } + + return epoch, nil +} + +// Checks if it's time to start the new hour epoch. +// This function returns epochStarting, epochNumber and a possible error. +func (k Keeper) CheckHourEpochStarting(ctx sdk.Context) (bool, uint64, error) { + hourEpoch, err := k.GetHourEpoch(ctx) + if err != nil { + return false, 0, err + } + + // If GetHourEpoch returned a zero-value epoch (due to error or missing key), + // we cannot proceed with the check. + if hourEpoch.Duration == 0 || hourEpoch.EpochStartTime.IsZero() { + return false, 0, errorsmod.Wrapf(types.ErrInvalidEpoce, "cannot check hour epoch starting. epoch: %v", hourEpoch) + } + + // If the block time is later than the current epoch start time + epoch duration, + // move onto the next epoch by incrementing the epoch number, height, and start time + currentEpochEndTime := hourEpoch.EpochStartTime.Add(hourEpoch.Duration) + shouldNextEpochStart := ctx.BlockTime().After(currentEpochEndTime) + if shouldNextEpochStart { + hourEpoch.EpochNumber++ + hourEpoch.EpochStartTime = currentEpochEndTime + hourEpoch.EpochStartHeight = ctx.BlockHeight() + + if err := k.SetHourEpoch(ctx, hourEpoch); err != nil { + return false, 0, err + } + return true, hourEpoch.EpochNumber, nil + } + + // Otherwise, indicate that a new epoch is not starting + return false, 0, nil +} diff --git a/modules/apps/rate-limiting/keeper/epoch_test.go b/modules/apps/rate-limiting/keeper/epoch_test.go new file mode 100644 index 00000000000..a648adff685 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/epoch_test.go @@ -0,0 +1,131 @@ +package keeper_test + +import ( + "time" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Tests Get/Set Hour epoch +func (s *KeeperTestSuite) TestHourEpoch() { + expectedHourEpoch := types.HourEpoch{ + Duration: time.Hour, + EpochNumber: 1, + EpochStartTime: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + EpochStartHeight: 10, + } + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), expectedHourEpoch) + s.Require().NoError(err) + + actualHourEpoch, err := s.chainA.GetSimApp().RateLimitKeeper.GetHourEpoch(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().Equal(expectedHourEpoch, actualHourEpoch, "hour epoch") +} + +func (s *KeeperTestSuite) TestCheckHourEpochStarting() { + epochStartTime := time.Date(2024, 1, 1, 0, 0, 1, 0, time.UTC) + blockHeight := int64(10) + duration := time.Minute + + initialEpoch := types.HourEpoch{ + EpochNumber: 10, + EpochStartTime: epochStartTime, + Duration: duration, + } + nextEpoch := types.HourEpoch{ + EpochNumber: initialEpoch.EpochNumber + 1, // epoch number increments + EpochStartTime: epochStartTime.Add(duration), // start time increments by duration + EpochStartHeight: blockHeight, // height gets current block height + Duration: duration, + } + + testCases := []struct { + name string + blockTime time.Time + expectedEpochStarting bool + initialEpoch types.HourEpoch + err error + }{ + { + name: "in middle of epoch", + blockTime: epochStartTime.Add(duration / 2), // halfway through epoch + expectedEpochStarting: false, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "right before epoch boundary", + blockTime: epochStartTime.Add(duration).Add(-1 * time.Second), // 1 second before epoch + expectedEpochStarting: false, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "at epoch boundary", + blockTime: epochStartTime.Add(duration), // at epoch boundary + expectedEpochStarting: false, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "right after epoch boundary", + blockTime: epochStartTime.Add(duration).Add(time.Second), // one second after epoch boundary + expectedEpochStarting: true, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "in middle of next epoch", + blockTime: epochStartTime.Add(duration).Add(duration / 2), // halfway through next epoch + expectedEpochStarting: true, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "next epoch skipped", + blockTime: epochStartTime.Add(duration * 10), // way after next epoch (still increments only once) + expectedEpochStarting: true, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "error - invalid epoch", + blockTime: epochStartTime.Add(duration * 10), // way after next epoch (still increments only once) + expectedEpochStarting: true, + initialEpoch: types.HourEpoch{ + Duration: 0, + }, + err: types.ErrInvalidEpoce, + }, + } + + // Set the block height to blockHeight + s.coordinator.CommitNBlocks(s.chainA, uint64(blockHeight)-uint64(s.chainA.App.LastBlockHeight()+1)) + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.coordinator.SetTime(tc.blockTime) + + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), tc.initialEpoch) + s.Require().NoError(err) + + actualStarting, actualEpochNumber, err := s.chainA.GetSimApp().RateLimitKeeper.CheckHourEpochStarting(s.chainA.GetContext()) + if tc.err != nil { + s.Require().ErrorIs(err, tc.err) + return + } + s.Require().NoError(err) + s.Require().Equal(tc.expectedEpochStarting, actualStarting, "epoch starting") + + expectedEpoch := tc.initialEpoch + if tc.expectedEpochStarting { + expectedEpoch = nextEpoch + s.Require().Equal(expectedEpoch.EpochNumber, actualEpochNumber, "epoch number") + } + + actualHourEpoch, err := s.chainA.GetSimApp().RateLimitKeeper.GetHourEpoch(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().Equal(expectedEpoch, actualHourEpoch, "hour epoch") + }) + } +} diff --git a/modules/apps/rate-limiting/keeper/events.go b/modules/apps/rate-limiting/keeper/events.go new file mode 100644 index 00000000000..f37b0356a5f --- /dev/null +++ b/modules/apps/rate-limiting/keeper/events.go @@ -0,0 +1,27 @@ +package keeper + +import ( + "strings" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// If the rate limit is exceeded or the denom is blacklisted, we emit an event +func EmitTransferDeniedEvent(ctx sdk.Context, reason, denom, channelOrClientID string, direction types.PacketDirection, amount sdkmath.Int, err error) { + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTransferDenied, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyReason, reason), + sdk.NewAttribute(types.AttributeKeyAction, strings.ToLower(direction.String())), // packet_send or packet_recv + sdk.NewAttribute(types.AttributeKeyDenom, denom), + sdk.NewAttribute(types.AttributeKeyChannelOrClient, channelOrClientID), + sdk.NewAttribute(types.AttributeKeyAmount, amount.String()), + sdk.NewAttribute(types.AttributeKeyError, err.Error()), + ), + ) +} diff --git a/modules/apps/rate-limiting/keeper/flow.go b/modules/apps/rate-limiting/keeper/flow.go new file mode 100644 index 00000000000..dc53841295c --- /dev/null +++ b/modules/apps/rate-limiting/keeper/flow.go @@ -0,0 +1,74 @@ +package keeper + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// The total value on a given path (aka, the denominator in the percentage calculation) +// is the total supply of the given denom +func (k Keeper) GetChannelValue(ctx sdk.Context, denom string) sdkmath.Int { + return k.bankKeeper.GetSupply(ctx, denom).Amount +} + +// CheckRateLimitAndUpdateFlow checks whether the given packet will exceed the rate limit. +// Called by OnRecvPacket and OnSendPacket +func (k Keeper) CheckRateLimitAndUpdateFlow(ctx sdk.Context, direction types.PacketDirection, packetInfo RateLimitedPacketInfo) (updatedFlow bool, err error) { + denom := packetInfo.Denom + channelOrClientID := packetInfo.ChannelID + amount := packetInfo.Amount + + // First check if the denom is blacklisted + if k.IsDenomBlacklisted(ctx, denom) { + err := errorsmod.Wrapf(types.ErrDenomIsBlacklisted, "denom %s is blacklisted", denom) + EmitTransferDeniedEvent(ctx, types.EventBlacklistedDenom, denom, channelOrClientID, direction, amount, err) + return false, err + } + + // If there's no rate limit yet for this denom, no action is necessary + rateLimit, found := k.GetRateLimit(ctx, denom, channelOrClientID) + if !found { + return false, nil + } + + // Check if the sender/receiver pair is whitelisted + // If so, return a success without modifying the quota + if k.IsAddressPairWhitelisted(ctx, packetInfo.Sender, packetInfo.Receiver) { + return false, nil + } + + // Update the flow object with the change in amount + if err := rateLimit.UpdateFlow(direction, amount); err != nil { + // If the rate limit was exceeded, emit an event + EmitTransferDeniedEvent(ctx, types.EventRateLimitExceeded, denom, channelOrClientID, direction, amount, err) + return false, err + } + + // If there's no quota error, update the rate limit object in the store with the new flow + k.SetRateLimit(ctx, rateLimit) + + return true, nil +} + +// If a SendPacket fails or times out, undo the outflow increment that happened during the send +func (k Keeper) UndoSendPacket(ctx sdk.Context, channelOrClientID string, sequence uint64, denom string, amount sdkmath.Int) error { + rateLimit, found := k.GetRateLimit(ctx, denom, channelOrClientID) + if !found { + return nil + } + + // If the packet was sent during this quota, decrement the outflow + // Otherwise, it can be ignored + if k.CheckPacketSentDuringCurrentQuota(ctx, channelOrClientID, sequence) { + rateLimit.Flow.Outflow = rateLimit.Flow.Outflow.Sub(amount) + k.SetRateLimit(ctx, rateLimit) + + k.RemovePendingSendPacket(ctx, channelOrClientID, sequence) + } + + return nil +} diff --git a/modules/apps/rate-limiting/keeper/flow_test.go b/modules/apps/rate-limiting/keeper/flow_test.go new file mode 100644 index 00000000000..26d22f4f80e --- /dev/null +++ b/modules/apps/rate-limiting/keeper/flow_test.go @@ -0,0 +1,449 @@ +package keeper_test + +import ( + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +type action struct { + direction types.PacketDirection + amount int64 + addToBlacklist bool + removeFromBlacklist bool + addToWhitelist bool + removeFromWhitelist bool + skipFlowUpdate bool + expectedError string +} + +type checkRateLimitTestCase struct { + name string + actions []action +} + +func (s *KeeperTestSuite) TestGetChannelValue() { + supply := sdkmath.NewInt(100) + + // Mint coins to increase the supply, which will increase the channel value + err := s.chainA.GetSimApp().BankKeeper.MintCoins(s.chainA.GetContext(), minttypes.ModuleName, sdk.NewCoins(sdk.NewCoin(denom, supply))) + s.Require().NoError(err) + + expected := supply + actual := s.chainA.GetSimApp().RateLimitKeeper.GetChannelValue(s.chainA.GetContext(), denom) + s.Require().Equal(expected, actual) +} + +// Adds a rate limit object to the store in preparation for the check rate limit tests +func (s *KeeperTestSuite) SetupCheckRateLimitAndUpdateFlowTest() { + channelValue := sdkmath.NewInt(100) + maxPercentSend := sdkmath.NewInt(10) + maxPercentRecv := sdkmath.NewInt(10) + + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelOrClientId: channelID, + }, + Quota: &types.Quota{ + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: 1, + }, + Flow: &types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + }, + }) + + s.chainA.GetSimApp().RateLimitKeeper.RemoveDenomFromBlacklist(s.chainA.GetContext(), denom) + s.chainA.GetSimApp().RateLimitKeeper.RemoveWhitelistedAddressPair(s.chainA.GetContext(), sender, receiver) +} + +// Helper function to check the rate limit across a series of transfers +func (s *KeeperTestSuite) processCheckRateLimitAndUpdateFlowTestCase(tc checkRateLimitTestCase) { + s.SetupCheckRateLimitAndUpdateFlowTest() + + expectedInflow := sdkmath.NewInt(0) + expectedOutflow := sdkmath.NewInt(0) + for i, action := range tc.actions { + if action.addToBlacklist { + s.chainA.GetSimApp().RateLimitKeeper.AddDenomToBlacklist(s.chainA.GetContext(), denom) + continue + } + + if action.removeFromBlacklist { + s.chainA.GetSimApp().RateLimitKeeper.RemoveDenomFromBlacklist(s.chainA.GetContext(), denom) + continue + } + + if action.addToWhitelist { + s.chainA.GetSimApp().RateLimitKeeper.SetWhitelistedAddressPair(s.chainA.GetContext(), types.WhitelistedAddressPair{ + Sender: sender, + Receiver: receiver, + }) + continue + } + + if action.removeFromWhitelist { + s.chainA.GetSimApp().RateLimitKeeper.RemoveWhitelistedAddressPair(s.chainA.GetContext(), sender, receiver) + continue + } + + amount := sdkmath.NewInt(action.amount) + packetInfo := keeper.RateLimitedPacketInfo{ + ChannelID: channelID, + Denom: denom, + Amount: amount, + Sender: sender, + Receiver: receiver, + } + updatedFlow, err := s.chainA.GetSimApp().RateLimitKeeper.CheckRateLimitAndUpdateFlow(s.chainA.GetContext(), action.direction, packetInfo) + + // Each action optionally errors or skips a flow update + if action.expectedError != "" { + s.Require().ErrorContains(err, action.expectedError, tc.name+" - action: #%d - error", i) + } else { + s.Require().NoError(err, tc.name+" - action: #%d - no error", i) + + expectedUpdateFlow := !action.skipFlowUpdate + s.Require().Equal(expectedUpdateFlow, updatedFlow, tc.name+" - action: #%d - updated flow", i) + + if expectedUpdateFlow { + if action.direction == types.PACKET_RECV { + expectedInflow = expectedInflow.Add(amount) + } else { + expectedOutflow = expectedOutflow.Add(amount) + } + } + } + + // Confirm flow is updated properly (or left as is if the theshold was exceeded) + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(expectedInflow.Int64(), rateLimit.Flow.Inflow.Int64(), tc.name+" - action: #%d - inflow", i) + s.Require().Equal(expectedOutflow.Int64(), rateLimit.Flow.Outflow.Int64(), tc.name+" - action: #%d - outflow", i) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdateFlow_UnidirectionalFlow() { + testCases := []checkRateLimitTestCase{ + { + name: "send_under_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 5}, + {direction: types.PACKET_SEND, amount: 5}, + }, + }, + { + name: "send_over_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 5}, + { + direction: types.PACKET_SEND, amount: 6, + expectedError: "Outflow exceeds quota", + }, + }, + }, + { + name: "recv_under_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 5}, + {direction: types.PACKET_RECV, amount: 5}, + }, + }, + { + name: "recv_over_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 5}, + { + direction: types.PACKET_RECV, amount: 6, + expectedError: "Inflow exceeds quota", + }, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdatedFlow_BidirectionalFlow() { + testCases := []checkRateLimitTestCase{ + { + name: "send_then_recv_under_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + }, + }, + { + name: "recv_then_send_under_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + }, + }, + { + name: "send_then_recv_over_inflow", + actions: []action{ + {direction: types.PACKET_SEND, amount: 2}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 2}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 2}, + { + direction: types.PACKET_RECV, amount: 6, + expectedError: "Inflow exceeds quota", + }, + }, + }, + { + name: "send_then_recv_over_outflow", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 2}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_SEND, amount: 1, expectedError: "Outflow exceeds quota"}, + }, + }, + { + name: "recv_then_send_over_inflow", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 2}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_RECV, amount: 1, expectedError: "Inflow exceeds quota"}, + }, + }, + { + name: "recv_then_send_over_outflow", + actions: []action{ + {direction: types.PACKET_RECV, amount: 2}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 2}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 2}, + {direction: types.PACKET_SEND, amount: 6, expectedError: "Outflow exceeds quota"}, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdatedFlow_DenomBlacklist() { + testCases := []checkRateLimitTestCase{ + { + name: "add_then_remove_from_blacklist", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + {removeFromBlacklist: true}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + }, + }, + { + name: "send_recv_blacklist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {addToBlacklist: true}, + { + direction: types.PACKET_SEND, amount: 6, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + }, + }, + { + name: "send_recv_blacklist_recv", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {addToBlacklist: true}, + { + direction: types.PACKET_RECV, amount: 6, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + }, + }, + { + name: "recv_send_blacklist_send", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + { + direction: types.PACKET_SEND, amount: 6, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + }, + }, + { + name: "recv_send_blacklist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + { + direction: types.PACKET_RECV, amount: 6, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdatedFlow_AddressWhitelist() { + testCases := []checkRateLimitTestCase{ + { + name: "send_whitelist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {addToWhitelist: true}, + {direction: types.PACKET_SEND, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "recv_whitelist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {addToWhitelist: true}, + {direction: types.PACKET_RECV, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "send_send_whitelist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_SEND, amount: 6, expectedError: "Outflow exceeds quota"}, + {addToWhitelist: true}, + {direction: types.PACKET_SEND, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "recv_recv_whitelist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_RECV, amount: 6, expectedError: "Inflow exceeds quota"}, + {addToWhitelist: true}, + {direction: types.PACKET_RECV, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "send_recv_send_whitelist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToWhitelist: true}, + {direction: types.PACKET_SEND, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "recv_send_recv_whitelist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {addToWhitelist: true}, + {direction: types.PACKET_RECV, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "add_then_remove_whitelist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {addToWhitelist: true}, + {removeFromWhitelist: true}, + {direction: types.PACKET_RECV, amount: 6, expectedError: "Inflow exceeds quota"}, + }, + }, + { + name: "add_then_remove_whitelist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {addToWhitelist: true}, + {removeFromWhitelist: true}, + {direction: types.PACKET_SEND, amount: 6, expectedError: "Outflow exceeds quota"}, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestUndoSendPacket() { + // Helper function to check the rate limit outflow amount + checkOutflow := func(channelId, denom string, expectedAmount sdkmath.Int) { + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelId) + s.Require().True(found, "rate limit should have been found") + s.Require().Equal(expectedAmount.Int64(), rateLimit.Flow.Outflow.Int64(), "outflow - channel: %s, denom: %s", channelId, denom) + } + + // Create two rate limits + initialOutflow := sdkmath.NewInt(100) + packetSendAmount := sdkmath.NewInt(10) + rateLimit1 := types.RateLimit{ + Path: &types.Path{Denom: denom, ChannelOrClientId: channelID}, + Flow: &types.Flow{Outflow: initialOutflow}, + } + rateLimit2 := types.RateLimit{ + Path: &types.Path{Denom: "different-denom", ChannelOrClientId: "different-channel"}, + Flow: &types.Flow{Outflow: initialOutflow}, + } + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), rateLimit1) + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), rateLimit2) + + // Store a pending packet sequence number of 2 for the first rate limit + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), channelID, 2) + + // Undo a send of 10 from the first rate limit, with sequence 1 + // If should NOT modify the outflow since sequence 1 was not sent in the current quota + err := s.chainA.GetSimApp().RateLimitKeeper.UndoSendPacket(s.chainA.GetContext(), channelID, 1, denom, packetSendAmount) + s.Require().NoError(err, "no error expected when undoing send packet sequence 1") + + checkOutflow(channelID, denom, initialOutflow) + + // Now undo a send from the same rate limit with sequence 2 + // If should decrement the outflow since 2 is in the current quota + err = s.chainA.GetSimApp().RateLimitKeeper.UndoSendPacket(s.chainA.GetContext(), channelID, 2, denom, packetSendAmount) + s.Require().NoError(err, "no error expected when undoing send packet sequence 2") + + checkOutflow(channelID, denom, initialOutflow.Sub(packetSendAmount)) + + // Confirm the outflow of the second rate limit has not been touched + checkOutflow("different-channel", "different-denom", initialOutflow) + + // Confirm sequence number was removed + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), channelID, 2) + s.Require().False(found, "packet sequence number should have been removed") +} diff --git a/modules/apps/rate-limiting/keeper/genesis.go b/modules/apps/rate-limiting/keeper/genesis.go new file mode 100644 index 00000000000..9701fd7b627 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/genesis.go @@ -0,0 +1,65 @@ +package keeper + +import ( + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// InitGenesis initializes the rate-limiting module's state from a provided genesis state. +func (k Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) { + // Set rate limits, blacklists, and whitelists + for _, rateLimit := range state.RateLimits { + k.SetRateLimit(ctx, rateLimit) + } + for _, denom := range state.BlacklistedDenoms { + k.AddDenomToBlacklist(ctx, denom) + } + for _, addressPair := range state.WhitelistedAddressPairs { + k.SetWhitelistedAddressPair(ctx, addressPair) + } + + // Set pending sequence numbers - validating that they're in right format of {channelId}/{sequenceNumber} + for _, pendingPacketID := range state.PendingSendPacketSequenceNumbers { + channelOrClientID, sequence, err := types.ParsePendingPacketID(pendingPacketID) + if err != nil { + panic(err.Error()) + } + k.SetPendingSendPacket(ctx, channelOrClientID, sequence) + } + + // If the hour epoch has been initialized already (epoch number != 0), validate and then use it + if state.HourEpoch.EpochNumber > 0 { + if err := k.SetHourEpoch(ctx, state.HourEpoch); err != nil { + panic(err) + } + } else { + // If the hour epoch has not been initialized yet, set it so that the epoch number matches + // the current hour and the start time is precisely on the hour + state.HourEpoch.EpochNumber = uint64(ctx.BlockTime().Hour()) //nolint:gosec + state.HourEpoch.EpochStartTime = ctx.BlockTime().Truncate(time.Hour) + state.HourEpoch.EpochStartHeight = ctx.BlockHeight() + if err := k.SetHourEpoch(ctx, state.HourEpoch); err != nil { + panic(err) + } + } +} + +// ExportGenesis returns the rate-limiting module's exported genesis. +func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + rateLimits := k.GetAllRateLimits(ctx) + hourEpoch, err := k.GetHourEpoch(ctx) + if err != nil { + panic(err) + } + + return &types.GenesisState{ + RateLimits: rateLimits, + BlacklistedDenoms: k.GetAllBlacklistedDenoms(ctx), + WhitelistedAddressPairs: k.GetAllWhitelistedAddressPairs(ctx), + PendingSendPacketSequenceNumbers: k.GetAllPendingSendPackets(ctx), + HourEpoch: hourEpoch, + } +} diff --git a/modules/apps/rate-limiting/keeper/genesis_test.go b/modules/apps/rate-limiting/keeper/genesis_test.go new file mode 100644 index 00000000000..162c5057f77 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/genesis_test.go @@ -0,0 +1,110 @@ +package keeper_test + +import ( + "strconv" + "time" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func createRateLimits() []types.RateLimit { + rateLimits := []types.RateLimit{} + for i := int64(1); i <= 3; i++ { + suffix := strconv.Itoa(int(i)) + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom-" + suffix, ChannelOrClientId: "channel-" + suffix}, + Quota: &types.Quota{MaxPercentSend: sdkmath.NewInt(i), MaxPercentRecv: sdkmath.NewInt(i), DurationHours: uint64(i)}, //nolint:gosec + Flow: &types.Flow{Inflow: sdkmath.NewInt(i), Outflow: sdkmath.NewInt(i), ChannelValue: sdkmath.NewInt(i)}, + } + + rateLimits = append(rateLimits, rateLimit) + } + return rateLimits +} + +func (s *KeeperTestSuite) TestGenesis() { + currentHour := 13 + blockTime := time.Date(2024, 1, 1, currentHour, 55, 8, 0, time.UTC) // 13:55:08 + blockHeight := int64(10) + + testCases := []struct { + name string + genesisState types.GenesisState + firstEpoch bool + panicError string + }{ + { + name: "valid default state", + genesisState: *types.DefaultGenesis(), + firstEpoch: true, + }, + { + name: "valid custom state", + genesisState: types.GenesisState{ + RateLimits: createRateLimits(), + WhitelistedAddressPairs: []types.WhitelistedAddressPair{ + {Sender: "senderA", Receiver: "receiverA"}, + {Sender: "senderB", Receiver: "receiverB"}, + }, + BlacklistedDenoms: []string{"denomA", "denomB"}, + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2/3"}, + HourEpoch: types.HourEpoch{ + EpochNumber: 1, + EpochStartTime: blockTime, + Duration: time.Minute, + EpochStartHeight: 1, + }, + }, + firstEpoch: false, + }, + { + name: "invalid packet sequence - wrong delimiter", + genesisState: types.GenesisState{ + RateLimits: createRateLimits(), + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2|3"}, + }, + panicError: "invalid pending send packet (channel-2|3), must be of form: {channelId}/{sequenceNumber}", + }, + } + + // Establish base height and time before the loop + s.coordinator.CommitNBlocks(s.chainA, uint64(blockHeight-s.chainA.App.LastBlockHeight()+1)) + s.coordinator.SetTime(blockTime) + + for _, tc := range testCases { + s.Run(tc.name, func() { + if tc.panicError != "" { + s.Require().PanicsWithValue(tc.panicError, func() { + s.chainA.GetSimApp().RateLimitKeeper.InitGenesis(s.chainA.GetContext(), tc.genesisState) + }) + return + } + s.chainA.GetSimApp().RateLimitKeeper.InitGenesis(s.chainA.GetContext(), tc.genesisState) + + // If the hour epoch was not initialized in the raw genState, + // it will be initialized during InitGenesis + expectedGenesis := tc.genesisState + + // For the default genesis with firstEpoch=true, InitGenesis will set the HourEpoch fields + // based on the current block time and height + if tc.firstEpoch { + // Get the context to retrieve current height + ctx := s.chainA.GetContext() + + // For a new epoch, InitGenesis will: + // - Set EpochNumber to current hour (13 from blockTime) + // - Set EpochStartTime to the truncated hour (13:00:00) + // - Set EpochStartHeight to current block height + expectedGenesis.HourEpoch.EpochNumber = uint64(blockTime.Hour()) + expectedGenesis.HourEpoch.EpochStartTime = blockTime.Truncate(time.Hour) + expectedGenesis.HourEpoch.EpochStartHeight = ctx.BlockHeight() + } + + // Check that the exported state matches the imported state + exportedState := s.chainA.GetSimApp().RateLimitKeeper.ExportGenesis(s.chainA.GetContext()) + s.Require().Equal(expectedGenesis, *exportedState, "exported genesis state") + }) + } +} diff --git a/modules/apps/rate-limiting/keeper/grpc_query.go b/modules/apps/rate-limiting/keeper/grpc_query.go new file mode 100644 index 00000000000..075fc7f5f73 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/grpc_query.go @@ -0,0 +1,128 @@ +package keeper + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + "github.com/cosmos/ibc-go/v10/modules/core/exported" + tmclient "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" +) + +var _ types.QueryServer = Keeper{} + +// Query all rate limits +func (k Keeper) AllRateLimits(c context.Context, req *types.QueryAllRateLimitsRequest) (*types.QueryAllRateLimitsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + rateLimits := k.GetAllRateLimits(ctx) + return &types.QueryAllRateLimitsResponse{RateLimits: rateLimits}, nil +} + +// Query a rate limit by denom and channelId +func (k Keeper) RateLimit(c context.Context, req *types.QueryRateLimitRequest) (*types.QueryRateLimitResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + rateLimit, found := k.GetRateLimit(ctx, req.Denom, req.ChannelOrClientId) + if !found { + return &types.QueryRateLimitResponse{}, nil + } + return &types.QueryRateLimitResponse{RateLimit: &rateLimit}, nil +} + +// Query all rate limits for a given chain +func (k Keeper) RateLimitsByChainID(c context.Context, req *types.QueryRateLimitsByChainIDRequest) (*types.QueryRateLimitsByChainIDResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + rateLimits := make([]types.RateLimit, 0) + for _, rateLimit := range k.GetAllRateLimits(ctx) { + + // Determine the client state from the channel Id + _, clientState, err := k.channelKeeper.GetChannelClientState(ctx, transfertypes.PortID, rateLimit.Path.ChannelOrClientId) + if err != nil { + var ok bool + clientState, ok = k.clientKeeper.GetClientState(ctx, rateLimit.Path.ChannelOrClientId) + if !ok { + return &types.QueryRateLimitsByChainIDResponse{}, errorsmod.Wrapf(types.ErrInvalidClientState, "Unable to fetch client state from channel or client Id %s", rateLimit.Path.ChannelOrClientId) + } + } + + // Check if the client state is a tendermint client + if clientState.ClientType() != exported.Tendermint { + continue + } + + // Type assert to tendermint client state + tmClientState, ok := clientState.(*tmclient.ClientState) + if !ok { + // This should never happen if ClientType() == Tendermint, but check anyway + continue + } + + // If the chain ID matches, add the rate limit to the returned list + if tmClientState.GetChainID() == req.ChainId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChainIDResponse{RateLimits: rateLimits}, nil +} + +// Query all rate limits for a given channel +func (k Keeper) RateLimitsByChannelOrClientID(c context.Context, req *types.QueryRateLimitsByChannelOrClientIDRequest) (*types.QueryRateLimitsByChannelOrClientIDResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + rateLimits := make([]types.RateLimit, 0) + for _, rateLimit := range k.GetAllRateLimits(ctx) { + if rateLimit.Path.ChannelOrClientId == req.ChannelOrClientId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChannelOrClientIDResponse{RateLimits: rateLimits}, nil +} + +// Query all blacklisted denoms +func (k Keeper) AllBlacklistedDenoms(c context.Context, req *types.QueryAllBlacklistedDenomsRequest) (*types.QueryAllBlacklistedDenomsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + blacklistedDenoms := k.GetAllBlacklistedDenoms(ctx) + return &types.QueryAllBlacklistedDenomsResponse{Denoms: blacklistedDenoms}, nil +} + +// Query all whitelisted addresses +func (k Keeper) AllWhitelistedAddresses(c context.Context, req *types.QueryAllWhitelistedAddressesRequest) (*types.QueryAllWhitelistedAddressesResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + whitelistedAddresses := k.GetAllWhitelistedAddressPairs(ctx) + return &types.QueryAllWhitelistedAddressesResponse{AddressPairs: whitelistedAddresses}, nil +} diff --git a/modules/apps/rate-limiting/keeper/grpc_query_test.go b/modules/apps/rate-limiting/keeper/grpc_query_test.go new file mode 100644 index 00000000000..bb6c92c88eb --- /dev/null +++ b/modules/apps/rate-limiting/keeper/grpc_query_test.go @@ -0,0 +1,120 @@ +package keeper_test + +import ( + "fmt" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + ibctmtypes "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" +) + +// Add three rate limits on different channels +// Each should have a different chainId +func (s *KeeperTestSuite) setupQueryRateLimitTests() []types.RateLimit { + s.T().Helper() + + rateLimits := []types.RateLimit{} + for i := int64(0); i <= 2; i++ { + clientID := fmt.Sprintf("07-tendermint-%d", i) + chainID := fmt.Sprintf("chain-%d", i) + connectionID := fmt.Sprintf("connection-%d", i) + channelID := fmt.Sprintf("channel-%d", i) + + // First register the client, connection, and channel (so we can map back to chainId) + // Nothing in the client state matters besides the chainId + clientState := ibctmtypes.NewClientState(chainID, ibctmtypes.Fraction{}, time.Duration(0), time.Duration(0), time.Duration(0), clienttypes.Height{}, nil, nil) + connection := connectiontypes.ConnectionEnd{ClientId: clientID} + channel := channeltypes.Channel{ConnectionHops: []string{connectionID}} + + s.chainA.GetSimApp().IBCKeeper.ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) + s.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.SetConnection(s.chainA.GetContext(), connectionID, connection) + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(s.chainA.GetContext(), transfertypes.PortID, channelID, channel) + + // Then add the rate limit + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom", ChannelOrClientId: channelID}, + } + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), rateLimit) + rateLimits = append(rateLimits, rateLimit) + } + return rateLimits +} + +func (s *KeeperTestSuite) TestQueryAllRateLimits() { + expectedRateLimits := s.setupQueryRateLimitTests() + queryResponse, err := s.chainA.GetSimApp().RateLimitKeeper.AllRateLimits(s.chainA.GetContext(), &types.QueryAllRateLimitsRequest{}) + s.Require().NoError(err) + s.Require().ElementsMatch(expectedRateLimits, queryResponse.RateLimits) +} + +func (s *KeeperTestSuite) TestQueryRateLimit() { + allRateLimits := s.setupQueryRateLimitTests() + for _, expectedRateLimit := range allRateLimits { + queryResponse, err := s.chainA.GetSimApp().RateLimitKeeper.RateLimit(s.chainA.GetContext(), &types.QueryRateLimitRequest{ + Denom: expectedRateLimit.Path.Denom, + ChannelOrClientId: expectedRateLimit.Path.ChannelOrClientId, + }) + s.Require().NoError(err, "no error expected when querying rate limit on channel: %s", expectedRateLimit.Path.ChannelOrClientId) + s.Require().Equal(expectedRateLimit, *queryResponse.RateLimit) + } +} + +func (s *KeeperTestSuite) TestQueryRateLimitsByChainId() { + allRateLimits := s.setupQueryRateLimitTests() + for i, expectedRateLimit := range allRateLimits { + chainID := fmt.Sprintf("chain-%d", i) + queryResponse, err := s.chainA.GetSimApp().RateLimitKeeper.RateLimitsByChainID(s.chainA.GetContext(), &types.QueryRateLimitsByChainIDRequest{ + ChainId: chainID, + }) + s.Require().NoError(err, "no error expected when querying rate limit on chain: %s", chainID) + s.Require().Len(queryResponse.RateLimits, 1) + s.Require().Equal(expectedRateLimit, queryResponse.RateLimits[0]) + } +} + +func (s *KeeperTestSuite) TestQueryRateLimitsByChannelOrClientId() { + allRateLimits := s.setupQueryRateLimitTests() + for i, expectedRateLimit := range allRateLimits { + channelID := fmt.Sprintf("channel-%d", i) + queryResponse, err := s.chainA.GetSimApp().RateLimitKeeper.RateLimitsByChannelOrClientID(s.chainA.GetContext(), &types.QueryRateLimitsByChannelOrClientIDRequest{ + ChannelOrClientId: channelID, + }) + s.Require().NoError(err, "no error expected when querying rate limit on channel: %s", channelID) + s.Require().Len(queryResponse.RateLimits, 1) + s.Require().Equal(expectedRateLimit, queryResponse.RateLimits[0]) + } +} + +func (s *KeeperTestSuite) TestQueryAllBlacklistedDenoms() { + s.chainA.GetSimApp().RateLimitKeeper.AddDenomToBlacklist(s.chainA.GetContext(), "denom-A") + s.chainA.GetSimApp().RateLimitKeeper.AddDenomToBlacklist(s.chainA.GetContext(), "denom-B") + + queryResponse, err := s.chainA.GetSimApp().RateLimitKeeper.AllBlacklistedDenoms(sdk.WrapSDKContext(s.chainA.GetContext()), &types.QueryAllBlacklistedDenomsRequest{}) // Wrap context + s.Require().NoError(err, "no error expected when querying blacklisted denoms") + s.Require().Equal([]string{"denom-A", "denom-B"}, queryResponse.Denoms) +} + +func (s *KeeperTestSuite) TestQueryAllWhitelistedAddresses() { + s.chainA.GetSimApp().RateLimitKeeper.SetWhitelistedAddressPair(s.chainA.GetContext(), types.WhitelistedAddressPair{ + Sender: "address-A", + Receiver: "address-B", + }) + s.chainA.GetSimApp().RateLimitKeeper.SetWhitelistedAddressPair(s.chainA.GetContext(), types.WhitelistedAddressPair{ + Sender: "address-C", + Receiver: "address-D", + }) + queryResponse, err := s.chainA.GetSimApp().RateLimitKeeper.AllWhitelistedAddresses(sdk.WrapSDKContext(s.chainA.GetContext()), &types.QueryAllWhitelistedAddressesRequest{}) // Wrap context + s.Require().NoError(err, "no error expected when querying whitelisted addresses") + + expectedWhitelist := []types.WhitelistedAddressPair{ + {Sender: "address-A", Receiver: "address-B"}, + {Sender: "address-C", Receiver: "address-D"}, + } + s.Require().Equal(expectedWhitelist, queryResponse.AddressPairs) +} diff --git a/modules/apps/rate-limiting/keeper/keeper.go b/modules/apps/rate-limiting/keeper/keeper.go new file mode 100644 index 00000000000..e0040328b2b --- /dev/null +++ b/modules/apps/rate-limiting/keeper/keeper.go @@ -0,0 +1,68 @@ +package keeper + +import ( + "errors" + "fmt" + "strings" + + corestore "cosmossdk.io/core/store" + "cosmossdk.io/log" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" +) + +// Keeper maintains the link to storage and exposes getter/setter methods for the various parts of the state machine +type Keeper struct { + storeService corestore.KVStoreService + cdc codec.BinaryCodec + + ics4Wrapper porttypes.ICS4Wrapper + channelKeeper types.ChannelKeeper + clientKeeper types.ClientKeeper + + bankKeeper types.BankKeeper + authority string +} + +// NewKeeper creates a new rate-limiting Keeper instance +func NewKeeper(cdc codec.BinaryCodec, storeService corestore.KVStoreService, ics4Wrapper porttypes.ICS4Wrapper, channelKeeper types.ChannelKeeper, clientKeeper types.ClientKeeper, bankKeeper types.BankKeeper, authority string) Keeper { + if strings.TrimSpace(authority) == "" { + panic(errors.New("authority must be non-empty")) + } + + return Keeper{ + cdc: cdc, + storeService: storeService, + ics4Wrapper: ics4Wrapper, + channelKeeper: channelKeeper, + clientKeeper: clientKeeper, + bankKeeper: bankKeeper, + authority: authority, + } +} + +// SetICS4Wrapper sets the ICS4Wrapper. +// It is used after the middleware is created since the keeper needs the underlying module's SendPacket capability, +// creating a dependency cycle. +func (k *Keeper) SetICS4Wrapper(ics4Wrapper porttypes.ICS4Wrapper) { + k.ics4Wrapper = ics4Wrapper +} + +// ICS4Wrapper returns the ICS4Wrapper to send packets downstream. +func (k *Keeper) ICS4Wrapper() porttypes.ICS4Wrapper { + return k.ics4Wrapper +} + +// GetAuthority returns the module's authority. +func (k Keeper) GetAuthority() string { + return k.authority +} + +// Logger returns a module-specific logger. +func (Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} diff --git a/modules/apps/rate-limiting/keeper/keeper_test.go b/modules/apps/rate-limiting/keeper/keeper_test.go new file mode 100644 index 00000000000..024c0696c86 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/keeper_test.go @@ -0,0 +1,86 @@ +package keeper_test + +import ( + "testing" + + testifysuite "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/runtime" + + keeper "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + ratelimittypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +type KeeperTestSuite struct { + testifysuite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + chainC *ibctesting.TestChain +} + +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) +} + +func TestKeeperTestSuite(t *testing.T) { + testifysuite.Run(t, new(KeeperTestSuite)) +} + +func (s *KeeperTestSuite) TestNewKeeper() { + testCases := []struct { + name string + instantiateFn func() + panicMsg string + }{ + { + name: "success", + instantiateFn: func() { + keeper.NewKeeper( + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ratelimittypes.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, // This is now used as ics4Wrapper + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().IBCKeeper.ClientKeeper, // Add clientKeeper + s.chainA.GetSimApp().BankKeeper, + s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), + ) + }, + panicMsg: "", + }, + { + name: "failure: empty authority", + instantiateFn: func() { + keeper.NewKeeper( + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ratelimittypes.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, // ics4Wrapper + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().IBCKeeper.ClientKeeper, // clientKeeper + s.chainA.GetSimApp().BankKeeper, + "", // empty authority + ) + }, + panicMsg: "authority must be non-empty", + }, + } + + for _, tc := range testCases { + tc := tc + s.SetupTest() + + s.Run(tc.name, func() { + if tc.panicMsg == "" { + s.Require().NotPanics(tc.instantiateFn) + } else { + s.Require().PanicsWithError(tc.panicMsg, tc.instantiateFn) + } + }) + } +} diff --git a/modules/apps/rate-limiting/keeper/msg_server.go b/modules/apps/rate-limiting/keeper/msg_server.go new file mode 100644 index 00000000000..81d534963b2 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/msg_server.go @@ -0,0 +1,81 @@ +package keeper + +import ( + "context" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +type msgServer struct { + Keeper +} + +// NewMsgServerImpl returns an implementation of the ratelimit MsgServer interface +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} + +// Adds a new rate limit. Fails if the rate limit already exists or the channel value is 0 +func (k msgServer) AddRateLimit(goCtx context.Context, msg *types.MsgAddRateLimit) (*types.MsgAddRateLimitResponse, error) { + if k.authority != msg.Signer { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Signer) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + if err := k.Keeper.AddRateLimit(ctx, msg); err != nil { + return nil, err + } + + return &types.MsgAddRateLimitResponse{}, nil +} + +// Updates an existing rate limit. Fails if the rate limit doesn't exist +func (k msgServer) UpdateRateLimit(goCtx context.Context, msg *types.MsgUpdateRateLimit) (*types.MsgUpdateRateLimitResponse, error) { + if k.authority != msg.Signer { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Signer) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + if err := k.Keeper.UpdateRateLimit(ctx, msg); err != nil { + return nil, err + } + + return &types.MsgUpdateRateLimitResponse{}, nil +} + +// Removes a rate limit. Fails if the rate limit doesn't exist +func (k msgServer) RemoveRateLimit(goCtx context.Context, msg *types.MsgRemoveRateLimit) (*types.MsgRemoveRateLimitResponse, error) { + if k.authority != msg.Signer { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Signer) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + _, found := k.GetRateLimit(ctx, msg.Denom, msg.ChannelOrClientId) + if !found { + return nil, types.ErrRateLimitNotFound + } + + k.Keeper.RemoveRateLimit(ctx, msg.Denom, msg.ChannelOrClientId) + return &types.MsgRemoveRateLimitResponse{}, nil +} + +// Resets the flow on a rate limit. Fails if the rate limit doesn't exist +func (k msgServer) ResetRateLimit(goCtx context.Context, msg *types.MsgResetRateLimit) (*types.MsgResetRateLimitResponse, error) { + if k.authority != msg.Signer { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Signer) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + if err := k.Keeper.ResetRateLimit(ctx, msg.Denom, msg.ChannelOrClientId); err != nil { + return nil, err + } + + return &types.MsgResetRateLimitResponse{}, nil +} diff --git a/modules/apps/rate-limiting/keeper/msg_server_test.go b/modules/apps/rate-limiting/keeper/msg_server_test.go new file mode 100644 index 00000000000..0a74fa6827b --- /dev/null +++ b/modules/apps/rate-limiting/keeper/msg_server_test.go @@ -0,0 +1,225 @@ +package keeper_test + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" +) + +var ( + authority = authtypes.NewModuleAddress(govtypes.ModuleName).String() + + addRateLimitMsg = types.MsgAddRateLimit{ + Signer: authority, + Denom: "denom", + ChannelOrClientId: "channel-0", + MaxPercentRecv: sdkmath.NewInt(10), + MaxPercentSend: sdkmath.NewInt(20), + DurationHours: 30, + } + + updateRateLimitMsg = types.MsgUpdateRateLimit{ + Signer: authority, + Denom: "denom", + ChannelOrClientId: "channel-0", + MaxPercentRecv: sdkmath.NewInt(20), + MaxPercentSend: sdkmath.NewInt(30), + DurationHours: 40, + } + + removeRateLimitMsg = types.MsgRemoveRateLimit{ + Signer: authority, + Denom: "denom", + ChannelOrClientId: "channel-0", + } + + resetRateLimitMsg = types.MsgResetRateLimit{ + Signer: authority, + Denom: "denom", + ChannelOrClientId: "channel-0", + } +) + +// Helper function to create a channel and prevent a channel not exists error +func (s *KeeperTestSuite) createChannel(channelID string) { + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(s.chainA.GetContext(), transfertypes.PortID, channelID, channeltypes.Channel{}) +} + +// Helper function to mint tokens and create channel value to prevent a zero channel value error +func (s *KeeperTestSuite) createChannelValue(_ string, channelValue sdkmath.Int) { + err := s.chainA.GetSimApp().BankKeeper.MintCoins(s.chainA.GetContext(), minttypes.ModuleName, sdk.NewCoins(sdk.NewCoin(addRateLimitMsg.Denom, channelValue))) + s.Require().NoError(err) +} + +// Helper function to add a rate limit with an optional error expectation +func (s *KeeperTestSuite) addRateLimit(msgAddRateLimit types.MsgAddRateLimit, expectedErr *errorsmod.Error) { + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().RateLimitKeeper) + _, actualErr := msgServer.AddRateLimit(s.chainA.GetContext(), &msgAddRateLimit) + + // If it should have been added successfully, confirm no error + // and confirm the rate limit was created + if expectedErr == nil { + s.Require().NoError(actualErr) + + _, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), addRateLimitMsg.Denom, addRateLimitMsg.ChannelOrClientId) + s.Require().True(found) + } else { + // If it should have failed, check the error + s.Require().ErrorIs(actualErr, expectedErr) + } +} + +// Helper function to add a rate limit successfully +func (s *KeeperTestSuite) addRateLimitSuccessful(msgAddRateLimit types.MsgAddRateLimit) { + s.addRateLimit(msgAddRateLimit, nil) +} + +// Helper function to add a rate limit with an expected error +func (s *KeeperTestSuite) addRateLimitWithError(msgAddRateLimit types.MsgAddRateLimit, expectedErr *errorsmod.Error) { + s.addRateLimit(msgAddRateLimit, expectedErr) +} + +func (s *KeeperTestSuite) TestMsgServer_AddRateLimit() { + denom := addRateLimitMsg.Denom + channelID := addRateLimitMsg.ChannelOrClientId + channelValue := sdkmath.NewInt(100) + + // First try to add a rate limit when there's no channel value, it will fail + s.addRateLimitWithError(addRateLimitMsg, types.ErrZeroChannelValue) + + // Create channel value + s.createChannelValue(denom, channelValue) + + // Then try to add a rate limit before the channel has been created, it will also fail + s.addRateLimitWithError(addRateLimitMsg, types.ErrChannelNotFound) + + // Create the channel + s.createChannel(channelID) + + // Now add a rate limit successfully + s.addRateLimitSuccessful(addRateLimitMsg) + + // Finally, try to add the same rate limit again - it should fail + s.addRateLimitWithError(addRateLimitMsg, types.ErrRateLimitAlreadyExists) + + // Verify that signer == authority required + invalidSignerMsg := addRateLimitMsg + invalidSignerMsg.Signer = "" + s.addRateLimitWithError(invalidSignerMsg, govtypes.ErrInvalidSigner) +} + +func (s *KeeperTestSuite) TestMsgServer_UpdateRateLimit() { + denom := updateRateLimitMsg.Denom + channelID := updateRateLimitMsg.ChannelOrClientId + channelValue := sdkmath.NewInt(100) + + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().RateLimitKeeper) + + // Create channel and channel value + s.createChannel(channelID) + s.createChannelValue(denom, channelValue) + + // Attempt to update a rate limit that does not exist + _, err := msgServer.UpdateRateLimit(s.chainA.GetContext(), &updateRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful(addRateLimitMsg) + + // Update the rate limit successfully + _, err = msgServer.UpdateRateLimit(s.chainA.GetContext(), &updateRateLimitMsg) + s.Require().NoError(err) + + // Check ratelimit quota is updated correctly + updatedRateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(updatedRateLimit.Quota, &types.Quota{ + MaxPercentSend: updateRateLimitMsg.MaxPercentSend, + MaxPercentRecv: updateRateLimitMsg.MaxPercentRecv, + DurationHours: updateRateLimitMsg.DurationHours, + }) + + // Attempt to update a rate limit that has invalid authority + invalidSignerMsg := updateRateLimitMsg + invalidSignerMsg.Signer = "" + _, err = msgServer.UpdateRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, govtypes.ErrInvalidSigner) +} + +func (s *KeeperTestSuite) TestMsgServer_RemoveRateLimit() { + denom := removeRateLimitMsg.Denom + channelID := removeRateLimitMsg.ChannelOrClientId + channelValue := sdkmath.NewInt(100) + + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().RateLimitKeeper) + + s.createChannel(channelID) + s.createChannelValue(denom, channelValue) + + // Attempt to remove a rate limit that does not exist + _, err := msgServer.RemoveRateLimit(s.chainA.GetContext(), &removeRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful(addRateLimitMsg) + + // Remove the rate limit successfully + _, err = msgServer.RemoveRateLimit(s.chainA.GetContext(), &removeRateLimitMsg) + s.Require().NoError(err) + + // Confirm it was removed + _, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().False(found) + + // Attempt to Remove a rate limit that has invalid authority + invalidSignerMsg := removeRateLimitMsg + invalidSignerMsg.Signer = "" + _, err = msgServer.RemoveRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, govtypes.ErrInvalidSigner) +} + +func (s *KeeperTestSuite) TestMsgServer_ResetRateLimit() { + denom := resetRateLimitMsg.Denom + channelID := resetRateLimitMsg.ChannelOrClientId + channelValue := sdkmath.NewInt(100) + + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().RateLimitKeeper) + + s.createChannel(channelID) + s.createChannelValue(denom, channelValue) + + // Attempt to reset a rate limit that does not exist + _, err := msgServer.ResetRateLimit(s.chainA.GetContext(), &resetRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful(addRateLimitMsg) + + // Reset the rate limit successfully + _, err = msgServer.ResetRateLimit(s.chainA.GetContext(), &resetRateLimitMsg) + s.Require().NoError(err) + + // Check ratelimit quota is flow correctly + resetRateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(resetRateLimit.Flow, &types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + }) + + // Attempt to Remove a rate limit that has invalid authority + invalidSignerMsg := resetRateLimitMsg + invalidSignerMsg.Signer = "" + _, err = msgServer.ResetRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, govtypes.ErrInvalidSigner) +} diff --git a/modules/apps/rate-limiting/keeper/packet.go b/modules/apps/rate-limiting/keeper/packet.go new file mode 100644 index 00000000000..4d52a044b12 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/packet.go @@ -0,0 +1,269 @@ +package keeper + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" +) + +type RateLimitedPacketInfo struct { + ChannelID string + Denom string + Amount sdkmath.Int + Sender string + Receiver string +} + +// CheckAcknowledementSucceeded unmarshals IBC Acknowledgements, and determines +// whether the tx was successful +func (k Keeper) CheckAcknowledementSucceeded(ctx sdk.Context, ack []byte) (success bool, err error) { + // Check if the ack is the IBC v2 universal error acknowledgement + if bytes.Equal(ack, channeltypesv2.ErrorAcknowledgement[:]) { + return false, nil + } + + // Unmarshal the raw ack response + var acknowledgement channeltypes.Acknowledgement + if err := transfertypes.ModuleCdc.UnmarshalJSON(ack, &acknowledgement); err != nil { + return false, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %s", err.Error()) + } + + // The ack can come back as either AcknowledgementResult or AcknowledgementError + // If it comes back as AcknowledgementResult, the messages are encoded differently depending on the SDK version + switch response := acknowledgement.Response.(type) { + case *channeltypes.Acknowledgement_Result: + if len(response.Result) == 0 { + return false, errorsmod.Wrapf(channeltypes.ErrInvalidAcknowledgement, "acknowledgement result cannot be empty") + } + return true, nil + + case *channeltypes.Acknowledgement_Error: + k.Logger(ctx).Error(fmt.Sprintf("acknowledgement error: %s", response.Error)) + return false, nil + + default: + return false, errorsmod.Wrapf(channeltypes.ErrInvalidAcknowledgement, "unsupported acknowledgement response field type %T", response) + } +} + +// ParseDenomFromSendPacket parses the denom from the Send Packet. +// The denom that the rate limiter will use for a SEND packet depends on whether +// it was a NATIVE token (e.g. ustrd, stuatom, etc.) or NON-NATIVE token (e.g. ibc/...)... +// +// We can identify if the token is native or not by parsing the trace denom from the packet +// If the token is NATIVE, it will not have a prefix (e.g. ustrd), +// and if it is NON-NATIVE, it will have a prefix (e.g. transfer/channel-2/uosmo) +// +// For NATIVE denoms, return as is (e.g. ustrd) +// For NON-NATIVE denoms, take the ibc hash (e.g. hash "transfer/channel-2/usoms" into "ibc/...") +func ParseDenomFromSendPacket(packet transfertypes.FungibleTokenPacketData) string { + // Check if the denom is already an IBC denom (starts with "ibc/") + if strings.HasPrefix(packet.Denom, "ibc/") { + return packet.Denom + } + + // Determine the denom by looking at the denom trace path + denom := transfertypes.ExtractDenomFromPath(packet.Denom) + return denom.IBCDenom() +} + +// ParseDenomFromRecvPacket parses the denom from the Recv Packet that will be used by the rate limit module. +// The denom that the rate limiter will use for a RECEIVE packet depends on whether it was a source or sink. +// +// Sink: The token moves forward, to a chain different than its previous hop +// The new port and channel are APPENDED to the denom trace. +// (e.g. A -> B, B is a sink) (e.g. A -> B -> C, C is a sink) +// +// Source: The token moves backwards (i.e. revisits the last chain it was sent from) +// The port and channel are REMOVED from the denom trace - undoing the last hop. +// (e.g. A -> B -> A, A is a source) (e.g. A -> B -> C -> B, B is a source) +// +// If the chain is acting as a SINK: We add on the port and channel and hash it +// Ex1: uosmo sent from Osmosis to Stride +// Packet Denom: uosmo +// -> Add Prefix: transfer/channel-X/uosmo +// -> Hash: ibc/... +// +// Ex2: ujuno sent from Osmosis to Stride +// PacketDenom: transfer/channel-Y/ujuno (channel-Y is the Juno <> Osmosis channel) +// -> Add Prefix: transfer/channel-X/transfer/channel-Y/ujuno +// -> Hash: ibc/... +// +// If the chain is acting as a SOURCE: First, remove the prefix. Then if there is still a denom trace, hash it +// Ex1: ustrd sent back to Stride from Osmosis +// Packet Denom: transfer/channel-X/ustrd +// -> Remove Prefix: ustrd +// -> Leave as is: ustrd +// +// Ex2: juno was sent to Stride, then to Osmosis, then back to Stride +// Packet Denom: transfer/channel-X/transfer/channel-Z/ujuno +// -> Remove Prefix: transfer/channel-Z/ujuno +// -> Hash: ibc/... +func ParseDenomFromRecvPacket(packet channeltypes.Packet, packetData transfertypes.FungibleTokenPacketData) string { + sourcePort := packet.SourcePort + sourceChannel := packet.SourceChannel + + // To determine the denom, first check whether Stride is acting as source + // Build the source prefix and check if the denom starts with it + hop := transfertypes.NewHop(sourcePort, sourceChannel) + sourcePrefix := hop.String() + "/" + + if strings.HasPrefix(packetData.Denom, sourcePrefix) { + // Remove the source prefix (e.g. transfer/channel-X/transfer/channel-Z/ujuno -> transfer/channel-Z/ujuno) + unprefixedDenom := packetData.Denom[len(sourcePrefix):] + + // Native assets will have an empty trace path and can be returned as is + denom := transfertypes.ExtractDenomFromPath(unprefixedDenom) + return denom.IBCDenom() + } + // Prefix the destination channel - this will contain the trailing slash (e.g. transfer/channel-X/) + destinationPrefix := transfertypes.NewHop(packet.GetDestPort(), packet.GetDestChannel()) + prefixedDenom := destinationPrefix.String() + "/" + packetData.Denom + + // Hash the denom trace + denom := transfertypes.ExtractDenomFromPath(prefixedDenom) + return denom.IBCDenom() +} + +// ParsePacketInfo parses the sender and channelId and denom for the corresponding RateLimit object, and +// the sender/receiver/transfer amount +// +// The channelID should always be used as the key for the RateLimit object (not the counterparty channelID) +// For a SEND packet, the channelID is the SOURCE channel +// For a RECEIVE packet, the channelID is the DESTINATION channel +// +// The Source and Destination are defined from the perspective of a packet recipient. +func ParsePacketInfo(packet channeltypes.Packet, direction types.PacketDirection) (RateLimitedPacketInfo, error) { + var packetData transfertypes.FungibleTokenPacketData + if err := json.Unmarshal(packet.GetData(), &packetData); err != nil { + return RateLimitedPacketInfo{}, err + } + + var channelID, denom string + if direction == types.PACKET_SEND { + channelID = packet.GetSourceChannel() + denom = ParseDenomFromSendPacket(packetData) + } else { + channelID = packet.GetDestChannel() + denom = ParseDenomFromRecvPacket(packet, packetData) + } + + amount, ok := sdkmath.NewIntFromString(packetData.Amount) + if !ok { + return RateLimitedPacketInfo{}, + errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "Unable to cast packet amount '%s' to sdkmath.Int", packetData.Amount) + } + + packetInfo := RateLimitedPacketInfo{ + ChannelID: channelID, + Denom: denom, + Amount: amount, + Sender: packetData.Sender, + Receiver: packetData.Receiver, + } + + return packetInfo, nil +} + +// Middleware implementation for SendPacket with rate limiting +// Checks whether the rate limit has been exceeded - and if it hasn't, sends the packet +func (k Keeper) SendRateLimitedPacket(ctx sdk.Context, sourcePort, sourceChannel string, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, data []byte) error { + seq, found := k.channelKeeper.GetNextSequenceSend(ctx, sourcePort, sourceChannel) + if !found { + return errorsmod.Wrapf(channeltypes.ErrSequenceSendNotFound, "source port: %s, source channel: %s", sourcePort, sourceChannel) + } + + packet := channeltypes.Packet{ + Sequence: seq, + SourcePort: sourcePort, + SourceChannel: sourceChannel, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTimestamp, + Data: data, + } + + packetInfo, err := ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + // Check if the packet would exceed the outflow rate limit + updatedFlow, err := k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_SEND, packetInfo) + if err != nil { + return err + } + + // Store the sequence number of the packet so that if the transfer fails, + // we can identify if it was sent during this quota and can revert the outflow + if updatedFlow { + k.SetPendingSendPacket(ctx, packetInfo.ChannelID, packet.Sequence) + } + + return nil +} + +// Middleware implementation for RecvPacket with rate limiting +// Checks whether the rate limit has been exceeded - and if it hasn't, allows the packet +func (k Keeper) ReceiveRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := ParsePacketInfo(packet, types.PACKET_RECV) + if err != nil { + // If the packet data is unparseable, we can't apply rate limiting. + // Log the error and allow the packet to proceed to the underlying app + // which is responsible for handling invalid packet data. + k.Logger(ctx).Error("Unable to parse packet data for rate limiting", "error", err) + return nil // Returning nil allows the packet to continue down the stack + } + + // If parsing was successful, check the rate limit + _, err = k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_RECV, packetInfo) + // If CheckRateLimitAndUpdateFlow returns an error (e.g., quota exceeded), return it to generate an error ack. + return err +} + +// AcknowledgeRateLimitedPacket implements for OnAckPacket for porttypes.Middleware. +// If the packet failed, we should decrement the Outflow. +func (k Keeper) AcknowledgeRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet, acknowledgement []byte) error { + ackSuccess, err := k.CheckAcknowledementSucceeded(ctx, acknowledgement) + if err != nil { + return err + } + + // Parse the denom, channelId, and amount from the packet + packetInfo, err := ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + // If the ack was successful, remove the pending packet + if ackSuccess { + k.RemovePendingSendPacket(ctx, packetInfo.ChannelID, packet.Sequence) + return nil + } + + // If the ack failed, undo the change to the rate limit Outflow + return k.UndoSendPacket(ctx, packetInfo.ChannelID, packet.Sequence, packetInfo.Denom, packetInfo.Amount) +} + +// Middleware implementation for OnAckPacket with rate limiting +// The Outflow should be decremented from the failed packet +func (k Keeper) TimeoutRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + return k.UndoSendPacket(ctx, packetInfo.ChannelID, packet.Sequence, packetInfo.Denom, packetInfo.Amount) +} diff --git a/modules/apps/rate-limiting/keeper/packet_test.go b/modules/apps/rate-limiting/keeper/packet_test.go new file mode 100644 index 00000000000..872805d8f3c --- /dev/null +++ b/modules/apps/rate-limiting/keeper/packet_test.go @@ -0,0 +1,795 @@ +package keeper_test + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + tmbytes "github.com/cometbft/cometbft/libs/bytes" + + packerforwardkeeper "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" + ratelimiting "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +const ( + transferPort = "transfer" + uosmo = "uosmo" + ujuno = "ujuno" + ustrd = "ustrd" + stuatom = "stuatom" + channelOnStride = "channel-0" + channelOnHost = "channel-1" +) + +func hashDenomTrace(denomTrace string) string { + trace32byte := sha256.Sum256([]byte(denomTrace)) + var traceTmByte tmbytes.HexBytes = trace32byte[:] + return fmt.Sprintf("ibc/%s", traceTmByte) +} + +func TestParseDenomFromSendPacket(t *testing.T) { + testCases := []struct { + name string + packetDenomTrace string + expectedDenom string + }{ + // Native assets stay as is + { + name: "ustrd", + packetDenomTrace: ustrd, + expectedDenom: ustrd, + }, + { + name: "stuatom", + packetDenomTrace: stuatom, + expectedDenom: stuatom, + }, + // Non-native assets are hashed + { + name: "uosmo_one_hop", + packetDenomTrace: "transfer/channel-0/usomo", + expectedDenom: hashDenomTrace("transfer/channel-0/usomo"), + }, + { + name: "uosmo_two_hops", + packetDenomTrace: "transfer/channel-2/transfer/channel-1/usomo", + expectedDenom: hashDenomTrace("transfer/channel-2/transfer/channel-1/usomo"), + }, + // IBC denoms are passed through as is + { + name: "ibc_denom", + packetDenomTrace: "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", + expectedDenom: "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + packet := transfertypes.FungibleTokenPacketData{ + Denom: tc.packetDenomTrace, + } + + parsedDenom := keeper.ParseDenomFromSendPacket(packet) + require.Equal(t, tc.expectedDenom, parsedDenom, tc.name) + }) + } +} + +func TestParseDenomFromRecvPacket(t *testing.T) { + osmoChannelOnStride := "channel-0" + strideChannelOnOsmo := "channel-100" + junoChannelOnOsmo := "channel-200" + junoChannelOnStride := "channel-300" + + testCases := []struct { + name string + packetDenomTrace string + sourceChannel string + destinationChannel string + expectedDenom string + }{ + // Sink asset one hop away: + // uosmo sent from Osmosis to Stride (uosmo) + // -> tack on prefix (transfer/channel-0/uosmo) and hash + { + name: "sink_one_hop", + packetDenomTrace: uosmo, + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, osmoChannelOnStride, uosmo)), + }, + // Sink asset two hops away: + // ujuno sent from Juno to Osmosis to Stride (transfer/channel-200/ujuno) + // -> tack on prefix (transfer/channel-0/transfer/channel-200/ujuno) and hash + { + name: "sink_two_hops", + packetDenomTrace: fmt.Sprintf("%s/%s/%s", transferPort, junoChannelOnOsmo, ujuno), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s/%s/%s", transferPort, osmoChannelOnStride, transferPort, junoChannelOnOsmo, ujuno)), + }, + // Native source assets + // ustrd sent from Stride to Osmosis and then back to Stride (transfer/channel-0/ustrd) + // -> remove prefix and leave as is (ustrd) + { + name: "native_source", + packetDenomTrace: fmt.Sprintf("%s/%s/%s", transferPort, strideChannelOnOsmo, ustrd), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: ustrd, + }, + // Non-native source assets + // ujuno was sent from Juno to Stride, then to Osmosis, then back to Stride (transfer/channel-0/transfer/channel-300/ujuno) + // -> remove prefix (transfer/channel-300/ujuno) and hash + { + name: "non_native_source", + packetDenomTrace: fmt.Sprintf("%s/%s/%s/%s/%s", transferPort, strideChannelOnOsmo, transferPort, junoChannelOnStride, ujuno), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, junoChannelOnStride, ujuno)), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + packet := channeltypes.Packet{ + SourcePort: transferPort, + DestinationPort: transferPort, + SourceChannel: tc.sourceChannel, + DestinationChannel: tc.destinationChannel, + } + packetData := transfertypes.FungibleTokenPacketData{ + Denom: tc.packetDenomTrace, + } + + parsedDenom := keeper.ParseDenomFromRecvPacket(packet, packetData) + require.Equal(t, tc.expectedDenom, parsedDenom, tc.name) + }) + } +} + +func (s *KeeperTestSuite) TestParsePacketInfo() { + sourceChannel := "channel-100" + destinationChannel := "channel-200" + denom := "denom" + amountString := "100" + amountInt := sdkmath.NewInt(100) + sender := "sender" + receiver := "receiver" + + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{ + Denom: denom, + Amount: amountString, + Sender: sender, + Receiver: receiver, + }) + s.Require().NoError(err) + + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + } + + // Send 'denom' from channel-100 (stride) -> channel-200 + // Since the 'denom' is native, it's kept as is for the rate limit object + expectedSendPacketInfo := keeper.RateLimitedPacketInfo{ + ChannelID: sourceChannel, + Denom: denom, + Amount: amountInt, + Sender: sender, + Receiver: receiver, + } + actualSendPacketInfo, err := keeper.ParsePacketInfo(packet, types.PACKET_SEND) + s.Require().NoError(err, "no error expected when parsing send packet") + s.Require().Equal(expectedSendPacketInfo, actualSendPacketInfo, "send packet") + + // Receive 'denom' from channel-100 -> channel-200 (stride) + // The stride channel (channel-200) should be tacked onto the end and the denom should be hashed + expectedRecvPacketInfo := keeper.RateLimitedPacketInfo{ + ChannelID: destinationChannel, + Denom: hashDenomTrace(fmt.Sprintf("transfer/%s/%s", destinationChannel, denom)), + Amount: amountInt, + Sender: sender, + Receiver: receiver, + } + actualRecvPacketInfo, err := keeper.ParsePacketInfo(packet, types.PACKET_RECV) + s.Require().NoError(err, "no error expected when parsing recv packet") + s.Require().Equal(expectedRecvPacketInfo, actualRecvPacketInfo, "recv packet") +} + +func (s *KeeperTestSuite) TestCheckAcknowledgementSucceeded() { + testCases := []struct { + name string + ack []byte + wantSuccess bool + wantErr error + }{ + { + name: "success legacy format", + ack: func() []byte { + return transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Result{Result: []byte{1}}, + }) + }(), + wantSuccess: true, + wantErr: nil, + }, + { + name: "failed legacy format - empty result", + ack: func() []byte { + return transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Result{}, + }) + }(), + wantSuccess: false, + wantErr: channeltypes.ErrInvalidAcknowledgement, + }, + { + name: "failed legacy format", + ack: func() []byte { + return transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Error{Error: "some error"}, + }) + }(), + wantSuccess: false, + wantErr: nil, + }, + { + name: "failed v2 format", + ack: channeltypesv2.ErrorAcknowledgement[:], + wantSuccess: false, + wantErr: nil, + }, + { + name: "invalid format", + ack: []byte("invalid ack"), + wantSuccess: false, + wantErr: sdkerrors.ErrUnknownRequest, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + success, err := s.chainA.GetSimApp().RateLimitKeeper.CheckAcknowledementSucceeded(s.chainA.GetContext(), tc.ack) + + if tc.wantErr != nil { + s.Require().ErrorIs(err, tc.wantErr, tc.name) + } else { + s.Require().NoError(err, "unexpected error for %s", tc.name) + } + + s.Require().Equal(tc.wantSuccess, success, + "expected success=%v for %s", tc.wantSuccess, tc.name) + }) + } +} + +func (s *KeeperTestSuite) createRateLimitCloseToQuota(denom string, channelID string, direction types.PacketDirection) { + channelValue := sdkmath.NewInt(100) + threshold := sdkmath.NewInt(10) + + // Set inflow/outflow close to threshold, depending on which direction we're going in + inflow := sdkmath.ZeroInt() + outflow := sdkmath.ZeroInt() + if direction == types.PACKET_RECV { + inflow = sdkmath.NewInt(9) + } else { + outflow = sdkmath.NewInt(9) + } + + // Store rate limit + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelOrClientId: channelID, + }, + Quota: &types.Quota{ + MaxPercentSend: threshold, + MaxPercentRecv: threshold, + }, + Flow: &types.Flow{ + Inflow: inflow, + Outflow: outflow, + ChannelValue: channelValue, + }, + }) +} + +func (s *KeeperTestSuite) TestSendRateLimitedPacket() { + // For send packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + amountToExceed := "5" + sequence := uint64(10) + + // Create rate limit (for SEND, use SOURCE channel) + s.createRateLimitCloseToQuota(denom, sourceChannel, types.PACKET_SEND) + + // This packet should cause an Outflow quota exceed error + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: amountToExceed}) + s.Require().NoError(err) + + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetNextSequenceSend(s.chainA.GetContext(), transferPort, sourceChannel, sequence) + // We check for a quota error because it doesn't appear until the end of the function + // We're avoiding checking for a success here because we can get a false positive if the rate limit doesn't exist + err = s.chainA.GetSimApp().RateLimitKeeper.SendRateLimitedPacket(s.chainA.GetContext(), transferPort, sourceChannel, clienttypes.Height{}, 0, packetData) + s.Require().ErrorIs(err, types.ErrQuotaExceeded, "error type") + s.Require().ErrorContains(err, "Outflow exceeds quota", "error text") + + // Reset the rate limit and try again + err = s.chainA.GetSimApp().RateLimitKeeper.ResetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().NoError(err, "no error expected when resetting rate limit") + + err = s.chainA.GetSimApp().RateLimitKeeper.SendRateLimitedPacket(s.chainA.GetContext(), transferPort, sourceChannel, clienttypes.Height{}, 0, packetData) + s.Require().NoError(err, "no error expected when sending packet after reset") + + // Check that the pending packet was stored + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), sourceChannel, sequence) + s.Require().True(found, "pending send packet") +} + +func (s *KeeperTestSuite) TestReceiveRateLimitedPacket() { + // For receive packets, the source will be the host and the destination will be stride + packetDenom := uosmo + sourceChannel := channelOnHost + destinationChannel := channelOnStride + amountToExceed := "5" + + // When the packet is received, the port and channel prefix will be added and the denom will be hashed + // before the rate limit is found from the store + rateLimitDenom := hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, channelOnStride, packetDenom)) + + // Create rate limit (for RECV, use DESTINATION channel) + s.createRateLimitCloseToQuota(rateLimitDenom, destinationChannel, types.PACKET_RECV) + + // This packet should cause an Outflow quota exceed error + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: packetDenom, Amount: amountToExceed}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + } + + // We check for a quota error because it doesn't appear until the end of the function + // We're avoiding checking for a success here because we can get a false positive if the rate limit doesn't exist + err = s.chainA.GetSimApp().RateLimitKeeper.ReceiveRateLimitedPacket(s.chainA.GetContext(), packet) + s.Require().ErrorIs(err, types.ErrQuotaExceeded, "error type") + s.Require().ErrorContains(err, "Inflow exceeds quota", "error text") +} + +func (s *KeeperTestSuite) TestAcknowledgeRateLimitedPacket_AckSuccess() { + // For ack packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + destinationChannel := channelOnHost + sequence := uint64(10) + + // Create rate limit - the flow and quota does not matter for this test + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: denom, ChannelOrClientId: channelID}, + }) + + // Store the pending packet for this sequence number + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), sourceChannel, sequence) + + // Build the ack packet + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: "10"}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + Sequence: sequence, + } + ackSuccess := transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Result{Result: []byte{1}}, + }) + + // Call AckPacket with the successful ack + err = s.chainA.GetSimApp().RateLimitKeeper.AcknowledgeRateLimitedPacket(s.chainA.GetContext(), packet, ackSuccess) + s.Require().NoError(err, "no error expected during AckPacket") + + // Confirm the pending packet was removed + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), sourceChannel, sequence) + s.Require().False(found, "send packet should have been removed") +} + +func (s *KeeperTestSuite) TestAcknowledgeRateLimitedPacket_AckFailure() { + // For ack packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + destinationChannel := channelOnHost + initialOutflow := sdkmath.NewInt(100) + packetAmount := sdkmath.NewInt(10) + sequence := uint64(10) + + // Create rate limit - only outflow is needed to this tests + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: denom, ChannelOrClientId: channelID}, + Flow: &types.Flow{Outflow: initialOutflow}, + }) + + // Store the pending packet for this sequence number + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), sourceChannel, sequence) + + // Build the ack packet + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: packetAmount.String()}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + Sequence: sequence, + } + ackFailure := transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Error{Error: "error"}, + }) + + // Call OnTimeoutPacket with the failed ack + err = s.chainA.GetSimApp().RateLimitKeeper.AcknowledgeRateLimitedPacket(s.chainA.GetContext(), packet, ackFailure) + s.Require().NoError(err, "no error expected during AckPacket") + + // Confirm the pending packet was removed + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), sourceChannel, sequence) + s.Require().False(found, "send packet should have been removed") + + // Confirm the flow was adjusted + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, sourceChannel) + s.Require().True(found) + s.Require().Equal(initialOutflow.Sub(packetAmount).Int64(), rateLimit.Flow.Outflow.Int64(), "outflow") +} + +func (s *KeeperTestSuite) TestTimeoutRateLimitedPacket() { + // For timeout packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + destinationChannel := channelOnHost + initialOutflow := sdkmath.NewInt(100) + packetAmount := sdkmath.NewInt(10) + sequence := uint64(10) + + // Create rate limit - only outflow is needed to this tests + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: denom, ChannelOrClientId: channelID}, + Flow: &types.Flow{Outflow: initialOutflow}, + }) + + // Store the pending packet for this sequence number + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), sourceChannel, sequence) + + // Build the timeout packet + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: packetAmount.String()}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + Sequence: sequence, + } + + // Call OnTimeoutPacket - the outflow should get decremented + err = s.chainA.GetSimApp().RateLimitKeeper.TimeoutRateLimitedPacket(s.chainA.GetContext(), packet) + s.Require().NoError(err, "no error expected when calling timeout packet") + + expectedOutflow := initialOutflow.Sub(packetAmount) + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(expectedOutflow.Int64(), rateLimit.Flow.Outflow.Int64(), "outflow decremented") + + // Check that the pending packet has been removed + found = s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), channelID, sequence) + s.Require().False(found, "pending packet should have been removed") + + // Call OnTimeoutPacket again with a different sequence number + // (to simulate a timeout that arrived in a different quota window from where the send occurred) + // The outflow should not change + packet.Sequence-- + err = s.chainA.GetSimApp().RateLimitKeeper.TimeoutRateLimitedPacket(s.chainA.GetContext(), packet) + s.Require().NoError(err, "no error expected when calling timeout packet again") + + rateLimit, found = s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(expectedOutflow.Int64(), rateLimit.Flow.Outflow.Int64(), "outflow should not have changed") +} + +// --- Middleware Tests --- + +// TestOnRecvPacket_Allowed tests the middleware's OnRecvPacket when the packet is allowed +func (s *KeeperTestSuite) TestOnRecvPacket_Allowed() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // Define recipient and calculate expected voucher denom on chain B + recipientAddr := s.chainB.SenderAccount.GetAddress() + voucherDenomStr := hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, path.EndpointB.ChannelID, uosmo)) + + // Fund recipient account with native denom + fundAmount := sdkmath.NewInt(1000000) + bondDenom, err := s.chainB.GetSimApp().StakingKeeper.BondDenom(s.chainB.GetContext()) + s.Require().NoError(err, "getting bond denom failed") + fundCoins := sdk.NewCoins(sdk.NewCoin(bondDenom, fundAmount)) + // Mint native denom to transfer module + err = s.chainB.GetSimApp().BankKeeper.MintCoins(s.chainB.GetContext(), transfertypes.ModuleName, fundCoins) + s.Require().NoError(err, "minting native denom coins to transfer module failed") + // Send native denom from transfer module to recipient + err = s.chainB.GetSimApp().BankKeeper.SendCoinsFromModuleToAccount(s.chainB.GetContext(), transfertypes.ModuleName, recipientAddr, fundCoins) + s.Require().NoError(err, "funding recipient account with native denom failed") + + // Create the test packet data + testAmountStr := "10" + testAmountInt, _ := sdkmath.NewIntFromString(testAmountStr) + packetDataBz, err := json.Marshal(transfertypes.FungibleTokenPacketData{ + Denom: uosmo, + Amount: testAmountStr, + Sender: s.chainA.SenderAccount.GetAddress().String(), + Receiver: recipientAddr.String(), + }) + s.Require().NoError(err) + + // Set the rate limit using the voucher denom string + simulatedSupply := sdkmath.NewInt(1000) // Keep simulated supply for rate limit calculation + s.chainB.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainB.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: voucherDenomStr, ChannelOrClientId: path.EndpointB.ChannelID}, + Quota: &types.Quota{MaxPercentRecv: sdkmath.NewInt(100), DurationHours: 1}, // High quota + Flow: &types.Flow{Inflow: sdkmath.ZeroInt(), Outflow: sdkmath.ZeroInt(), ChannelValue: simulatedSupply}, + }) + + timeoutTS := uint64(s.coordinator.CurrentTime.Add(time.Hour).UnixNano()) + // Commit the packet on chain A so that RelayPacket can find the commitment + seq, err := path.EndpointA.SendPacket(clienttypes.ZeroHeight(), timeoutTS, packetDataBz) + s.Require().NoError(err, "sending packet on chain A failed") + + packet := channeltypes.Packet{ + Sequence: seq, + SourcePort: path.EndpointA.ChannelConfig.PortID, + SourceChannel: path.EndpointA.ChannelID, + DestinationPort: path.EndpointB.ChannelConfig.PortID, + DestinationChannel: path.EndpointB.ChannelID, + Data: packetDataBz, + TimeoutHeight: clienttypes.ZeroHeight(), + TimeoutTimestamp: timeoutTS, + } + + // Relay the packet. This will call OnRecvPacket on chain B through the integrated middleware stack. + err = path.RelayPacket(packet) + s.Require().NoError(err, "relaying packet failed") + + // Check acknowledgement on chain B + ackBz, found := s.chainB.GetSimApp().IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + s.Require().True(found, "acknowledgement not found") + s.Require().NotNil(ackBz, "ack should not be nil") + + expectedAck := channeltypes.NewResultAcknowledgement([]byte{1}) + expBz := channeltypes.CommitAcknowledgement(expectedAck.Acknowledgement()) + s.Require().Equal(expBz, ackBz) + + // Check flow was updated + rateLimit, found := s.chainB.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainB.GetContext(), voucherDenomStr, path.EndpointB.ChannelID) + s.Require().True(found) + s.Require().Equal(testAmountInt.Int64(), rateLimit.Flow.Inflow.Int64(), "inflow should be updated") +} + +// TestOnRecvPacket_Denied tests the middleware's OnRecvPacket when the packet is denied +func (s *KeeperTestSuite) TestOnRecvPacket_Denied() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // Create rate limit with zero quota for recv + rateLimitDenom := hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, path.EndpointB.ChannelID, sdk.DefaultBondDenom)) + s.chainB.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainB.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: rateLimitDenom, ChannelOrClientId: path.EndpointB.ChannelID}, + Quota: &types.Quota{MaxPercentRecv: sdkmath.ZeroInt(), DurationHours: 1}, // Zero quota + Flow: &types.Flow{Inflow: sdkmath.ZeroInt(), Outflow: sdkmath.ZeroInt(), ChannelValue: sdkmath.NewInt(1000)}, + }) + + sender := s.chainA.SenderAccount.GetAddress() + receiver := s.chainB.SenderAccount.GetAddress() + sendCoin := ibctesting.TestCoin + + // Create packet data + packetDataBz, err := json.Marshal(transfertypes.FungibleTokenPacketData{ + Denom: sendCoin.Denom, + Amount: sendCoin.Amount.String(), + Sender: sender.String(), + Receiver: receiver.String(), + }) + s.Require().NoError(err) + + timeoutTS := uint64(s.coordinator.CurrentTime.Add(time.Hour).UnixNano()) + timeoutHeight := clienttypes.ZeroHeight() + sourcePort := path.EndpointA.ChannelConfig.PortID + sourceChannel := path.EndpointA.ChannelID + senderInitialBal := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), sender, sdk.DefaultBondDenom) + + // Commit the packet on chain A so that RelayPacket can find the commitment + transferMsg := transfertypes.NewMsgTransfer(sourcePort, sourceChannel, sendCoin, sender.String(), receiver.String(), timeoutHeight, timeoutTS, "") + resp, err := s.chainA.GetSimApp().TransferKeeper.Transfer(s.chainA.GetContext(), transferMsg) + s.Require().NoError(err) + + // After sending the transfer, "sendCoin" should be taken from the sender to escrow. + senderIntermedBal := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), sender, sdk.DefaultBondDenom) + s.Require().Equal(senderInitialBal.Sub(sendCoin), senderIntermedBal) + + // Manully commit block on Chain A + s.coordinator.CommitBlock(s.chainA) + + packet := channeltypes.Packet{ + Sequence: resp.Sequence, + SourcePort: sourcePort, + SourceChannel: sourceChannel, + DestinationPort: path.EndpointB.ChannelConfig.PortID, + DestinationChannel: path.EndpointB.ChannelID, + Data: packetDataBz, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTS, + } + + // Relay the packet. This will call OnRecvPacket on chain B through the integrated middleware stack. + err = path.RelayPacket(packet) + s.Require().NoError(err, "relaying packet failed") + + // Check acknowledgement on chain B + ackBytes, found := s.chainB.GetSimApp().IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + s.Require().True(found, "acknowledgement not found") + s.Require().NotNil(ackBytes, "ack bytes should not be nil") + + expectedAck := channeltypes.NewErrorAcknowledgement(types.ErrQuotaExceeded) + expBz := channeltypes.CommitAcknowledgement(expectedAck.Acknowledgement()) + s.Require().Equal(expBz, ackBytes) + + // Check flow was NOT updated + rateLimit, found := s.chainB.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainB.GetContext(), rateLimitDenom, path.EndpointB.ChannelID) + s.Require().True(found) + s.Require().True(rateLimit.Flow.Inflow.IsZero(), "inflow should NOT be updated") + + // Sender should be refunded + senderEndBal := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), sender, sdk.DefaultBondDenom) + s.Require().Equal(senderInitialBal, senderEndBal) +} + +// TestSendPacket_Allowed tests the middleware's SendPacket when the packet is allowed by directly calling the middleware +func (s *KeeperTestSuite) TestSendPacket_Allowed() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // Create rate limit with sufficient quota + rateLimitDenom := ustrd // Native denom + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: rateLimitDenom, ChannelOrClientId: path.EndpointA.ChannelID}, + Quota: &types.Quota{MaxPercentSend: sdkmath.NewInt(100), DurationHours: 1}, // High quota + Flow: &types.Flow{Inflow: sdkmath.ZeroInt(), Outflow: sdkmath.ZeroInt(), ChannelValue: sdkmath.NewInt(1000)}, + }) + + timeoutTimestamp := uint64(s.coordinator.CurrentTime.Add(time.Hour).UnixNano()) + amount := sdkmath.NewInt(10) + + // Create packet data + packetData := transfertypes.FungibleTokenPacketData{ + Denom: ustrd, + Amount: amount.String(), + Sender: s.chainA.SenderAccount.GetAddress().String(), + Receiver: s.chainB.SenderAccount.GetAddress().String(), + Memo: "", + } + packetDataBz, err := json.Marshal(packetData) + s.Require().NoError(err) + + // Get the middleware instance (assuming it's accessible via SimApp - needs verification) + // We need the transfer keeper's ICS4Wrapper which *is* the packet forward middleware's keeper + shouldPFM, ok := s.chainA.GetSimApp().TransferKeeper.GetICS4Wrapper().(*packerforwardkeeper.Keeper) + s.Require().Truef(ok, "Transfer keeper's ICS4Wrapper should be the PacketForward Middleware. Found %T", shouldPFM) + + // We need the transfer keeper's ICS4Wrapper which *is* the ratelimiting middleware + middleware, ok := s.chainA.GetSimApp().PFMKeeper.ICS4Wrapper().(ratelimiting.IBCMiddleware) + s.Require().Truef(ok, "PFM keeper's ICS4Wrapper should be the PacketForward Middleware. Found %T", middleware) + + // Directly call the middleware's SendPacket + seq, err := middleware.SendPacket( + s.chainA.GetContext(), + path.EndpointA.ChannelConfig.PortID, + path.EndpointA.ChannelID, + clienttypes.ZeroHeight(), // timeout height + timeoutTimestamp, + packetDataBz, + ) + + // Assert SendPacket succeeded + s.Require().NoError(err, "middleware.SendPacket should succeed") + s.Require().Equal(uint64(1), seq, "sequence should be 1") + + // Commit block and update context to ensure state updates are visible + s.coordinator.CommitBlock(s.chainA) + ctx := s.chainA.GetContext() // Get the latest context after commit + + // Check flow was updated using the latest context + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(ctx, rateLimitDenom, path.EndpointA.ChannelID) + s.Require().True(found) + s.Require().Equal(amount.Int64(), rateLimit.Flow.Outflow.Int64(), "outflow should be updated") + + // Check pending packet was stored using the latest context + found = s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(ctx, path.EndpointA.ChannelID, seq) + s.Require().True(found, "pending packet should be stored") +} + +// TestSendPacket_Denied tests the middleware's SendPacket when the packet is denied by directly calling the middleware +func (s *KeeperTestSuite) TestSendPacket_Denied() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // Create rate limit with a tiny quota that will be exceeded + rateLimitDenom := ustrd // Native denom + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: rateLimitDenom, ChannelOrClientId: path.EndpointA.ChannelID}, + Quota: &types.Quota{MaxPercentSend: sdkmath.NewInt(1), DurationHours: 1}, // Set quota to 1% (will allow < 10 with ChannelValue 1000) + Flow: &types.Flow{Inflow: sdkmath.ZeroInt(), Outflow: sdkmath.ZeroInt(), ChannelValue: sdkmath.NewInt(1000)}, + }) + + timeoutTimestamp := uint64(s.coordinator.CurrentTime.Add(time.Hour).UnixNano()) + amount := sdkmath.NewInt(11) // amount 11 will exceed 1% of 1000 (threshold is 10, check is GT) + + // Create packet data + packetData := transfertypes.FungibleTokenPacketData{ + Denom: ustrd, + Amount: amount.String(), + Sender: s.chainA.SenderAccount.GetAddress().String(), + Receiver: s.chainB.SenderAccount.GetAddress().String(), + Memo: "", + } + packetDataBz, err := json.Marshal(packetData) + s.Require().NoError(err) + + // Get the middleware instance + middleware, ok := s.chainA.GetSimApp().PFMKeeper.ICS4Wrapper().(ratelimiting.IBCMiddleware) + s.Require().Truef(ok, "Packet forward middleware keeper's ICS4Wrapper should be the RateLimit middleware. Found: %T", middleware) + + // Directly call the middleware's SendPacket + _, err = middleware.SendPacket( + s.chainA.GetContext(), + path.EndpointA.ChannelConfig.PortID, + path.EndpointA.ChannelID, + clienttypes.ZeroHeight(), // timeout height + timeoutTimestamp, + packetDataBz, + ) + + // Check error is quota exceeded + s.Require().Error(err, "middleware.SendPacket should fail") + s.Require().ErrorIs(err, types.ErrQuotaExceeded, "error should be quota exceeded") + + // Commit block and update context + s.coordinator.CommitBlock(s.chainA) + ctx := s.chainA.GetContext() // Get latest context + + // Check flow was NOT updated + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(ctx, rateLimitDenom, path.EndpointA.ChannelID) + s.Require().True(found) + s.Require().True(rateLimit.Flow.Outflow.IsZero(), "outflow should NOT be updated") +} diff --git a/modules/apps/rate-limiting/keeper/pending_send.go b/modules/apps/rate-limiting/keeper/pending_send.go new file mode 100644 index 00000000000..75cbed30b35 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/pending_send.go @@ -0,0 +1,80 @@ +package keeper + +import ( + "encoding/binary" + "fmt" + "strings" + + "cosmossdk.io/store/prefix" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Sets the sequence number of a packet that was just sent +func (k Keeper) SetPendingSendPacket(ctx sdk.Context, channelID string, sequence uint64) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + key := types.PendingSendPacketKey(channelID, sequence) + store.Set(key, []byte{1}) +} + +// Remove a pending packet sequence number from the store +// Used after the ack or timeout for a packet has been received +func (k Keeper) RemovePendingSendPacket(ctx sdk.Context, channelID string, sequence uint64) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + key := types.PendingSendPacketKey(channelID, sequence) + store.Delete(key) +} + +// Checks whether the packet sequence number is in the store - indicating that it was +// sent during the current quota +func (k Keeper) CheckPacketSentDuringCurrentQuota(ctx sdk.Context, channelID string, sequence uint64) bool { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + key := types.PendingSendPacketKey(channelID, sequence) + valueBz := store.Get(key) + found := len(valueBz) != 0 + return found +} + +// Get all pending packet sequence numbers +func (k Keeper) GetAllPendingSendPackets(ctx sdk.Context) []string { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + pendingPackets := make([]string, 0) + for ; iterator.Valid(); iterator.Next() { + key := iterator.Key() + + channelID := string(key[:types.PendingSendPacketChannelLength]) + channelID = strings.TrimRight(channelID, "\x00") // removes null bytes from suffix + sequence := binary.BigEndian.Uint64(key[types.PendingSendPacketChannelLength:]) + + packetID := fmt.Sprintf("%s/%d", channelID, sequence) + pendingPackets = append(pendingPackets, packetID) + } + + return pendingPackets +} + +// Remove all pending sequence numbers from the store +// This is executed when the quota resets +func (k Keeper) RemoveAllChannelPendingSendPackets(ctx sdk.Context, channelID string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + + iterator := storetypes.KVStorePrefixIterator(store, []byte(channelID)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + store.Delete(iterator.Key()) + } +} diff --git a/modules/apps/rate-limiting/keeper/pending_send_test.go b/modules/apps/rate-limiting/keeper/pending_send_test.go new file mode 100644 index 00000000000..03ce2068ae0 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/pending_send_test.go @@ -0,0 +1,40 @@ +package keeper_test + +import "fmt" + +func (s *KeeperTestSuite) TestPendingSendPacketPrefix() { + // Store 5 packets across two channels + sendPackets := []string{} + for _, channelID := range []string{"channel-0", "channel-1"} { + for sequence := uint64(0); sequence < 5; sequence++ { + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), channelID, sequence) + sendPackets = append(sendPackets, fmt.Sprintf("%s/%d", channelID, sequence)) + } + } + + // Check that they each sequence number is found + for _, channelID := range []string{"channel-0", "channel-1"} { + for sequence := uint64(0); sequence < 5; sequence++ { + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), channelID, sequence) + s.Require().True(found, "send packet should have been found - channel %s, sequence: %d", channelID, sequence) + } + } + + // Check lookup of all sequence numbers + actualSendPackets := s.chainA.GetSimApp().RateLimitKeeper.GetAllPendingSendPackets(s.chainA.GetContext()) + s.Require().Equal(sendPackets, actualSendPackets, "all send packets") + + // Remove 0 sequence numbers and all sequence numbers from channel-0 + s.chainA.GetSimApp().RateLimitKeeper.RemovePendingSendPacket(s.chainA.GetContext(), "channel-0", 0) + s.chainA.GetSimApp().RateLimitKeeper.RemovePendingSendPacket(s.chainA.GetContext(), "channel-1", 0) + s.chainA.GetSimApp().RateLimitKeeper.RemoveAllChannelPendingSendPackets(s.chainA.GetContext(), "channel-0") + + // Check that only the remaining sequences are found + for _, channelID := range []string{"channel-0", "channel-1"} { + for sequence := uint64(0); sequence < 5; sequence++ { + expected := (channelID == "channel-1") && (sequence != 0) + actual := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), channelID, sequence) + s.Require().Equal(expected, actual, "send packet after removal - channel: %s, sequence: %d", channelID, sequence) + } + } +} diff --git a/modules/apps/rate-limiting/keeper/rate_limit.go b/modules/apps/rate-limiting/keeper/rate_limit.go new file mode 100644 index 00000000000..139c699c613 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/rate_limit.go @@ -0,0 +1,174 @@ +package keeper + +import ( + sdkmath "cosmossdk.io/math" + "cosmossdk.io/store/prefix" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +// Stores/Updates a rate limit object in the store +func (k Keeper) SetRateLimit(ctx sdk.Context, rateLimit types.RateLimit) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.RateLimitKeyPrefix) + + rateLimitKey := types.RateLimitItemKey(rateLimit.Path.Denom, rateLimit.Path.ChannelOrClientId) + rateLimitValue := k.cdc.MustMarshal(&rateLimit) + + store.Set(rateLimitKey, rateLimitValue) +} + +// Removes a rate limit object from the store using denom and channel-id +func (k Keeper) RemoveRateLimit(ctx sdk.Context, denom string, channelID string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.RateLimitKeyPrefix) + rateLimitKey := types.RateLimitItemKey(denom, channelID) + store.Delete(rateLimitKey) +} + +// Grabs and returns a rate limit object from the store using denom and channel-id +func (k Keeper) GetRateLimit(ctx sdk.Context, denom string, channelID string) (rateLimit types.RateLimit, found bool) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.RateLimitKeyPrefix) + + rateLimitKey := types.RateLimitItemKey(denom, channelID) + rateLimitValue := store.Get(rateLimitKey) + + if len(rateLimitValue) == 0 { + return rateLimit, false + } + + k.cdc.MustUnmarshal(rateLimitValue, &rateLimit) + return rateLimit, true +} + +// Returns all rate limits stored +func (k Keeper) GetAllRateLimits(ctx sdk.Context) []types.RateLimit { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.RateLimitKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allRateLimits := []types.RateLimit{} + for ; iterator.Valid(); iterator.Next() { + rateLimit := types.RateLimit{} + if err := k.cdc.Unmarshal(iterator.Value(), &rateLimit); err != nil { + // Log the error and skip this entry if unmarshalling fails + k.Logger(ctx).Error("failed to unmarshal rate limit", "key", string(iterator.Key()), "error", err) + continue + } + allRateLimits = append(allRateLimits, rateLimit) + } + + return allRateLimits +} + +// Adds a new rate limit. Fails if the rate limit already exists or the channel value is 0 +func (k Keeper) AddRateLimit(ctx sdk.Context, msg *types.MsgAddRateLimit) error { + channelValue := k.GetChannelValue(ctx, msg.Denom) + if channelValue.IsZero() { + return types.ErrZeroChannelValue + } + + _, found := k.GetRateLimit(ctx, msg.Denom, msg.ChannelOrClientId) + if found { + return types.ErrRateLimitAlreadyExists + } + + // Confirm the channel or client exists + _, found = k.channelKeeper.GetChannel(ctx, transfertypes.PortID, msg.ChannelOrClientId) + if !found { + // Check if the channelId is actually a clientId + status := k.clientKeeper.GetClientStatus(ctx, msg.ChannelOrClientId) + // If the status is Unauthorized or Unknown, it means the client doesn't exist or is invalid + if status == ibcexported.Unknown || status == ibcexported.Unauthorized { + // Return specific error indicating neither channel nor client was found + return types.ErrChannelNotFound + } + // If status is Active, Expired, or Frozen, the client exists, proceed. + } + + // Create and store the rate limit object + path := types.Path{ + Denom: msg.Denom, + ChannelOrClientId: msg.ChannelOrClientId, + } + quota := types.Quota{ + MaxPercentSend: msg.MaxPercentSend, + MaxPercentRecv: msg.MaxPercentRecv, + DurationHours: msg.DurationHours, + } + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// Updates an existing rate limit. Fails if the rate limit doesn't exist +func (k Keeper) UpdateRateLimit(ctx sdk.Context, msg *types.MsgUpdateRateLimit) error { + _, found := k.GetRateLimit(ctx, msg.Denom, msg.ChannelOrClientId) + if !found { + return types.ErrRateLimitNotFound + } + + // Update the rate limit object with the new quota information + // The flow should also get reset to 0 + path := types.Path{ + Denom: msg.Denom, + ChannelOrClientId: msg.ChannelOrClientId, + } + quota := types.Quota{ + MaxPercentSend: msg.MaxPercentSend, + MaxPercentRecv: msg.MaxPercentRecv, + DurationHours: msg.DurationHours, + } + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, msg.Denom), + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// Reset the rate limit after expiration +// The inflow and outflow should get reset to 0, the channelValue should be updated, +// and all pending send packet sequence numbers should be removed +func (k Keeper) ResetRateLimit(ctx sdk.Context, denom string, channelID string) error { + rateLimit, found := k.GetRateLimit(ctx, denom, channelID) + if !found { + return types.ErrRateLimitNotFound + } + + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, denom), + } + rateLimit.Flow = &flow + + k.SetRateLimit(ctx, rateLimit) + k.RemoveAllChannelPendingSendPackets(ctx, channelID) + return nil +} diff --git a/modules/apps/rate-limiting/keeper/rate_limit_test.go b/modules/apps/rate-limiting/keeper/rate_limit_test.go new file mode 100644 index 00000000000..cc5bd6379fa --- /dev/null +++ b/modules/apps/rate-limiting/keeper/rate_limit_test.go @@ -0,0 +1,131 @@ +package keeper_test + +import ( + "strconv" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +const ( + denom = "denom" + channelID = "channel-0" + sender = "sender" + receiver = "receiver" +) + +// Helper function to create 5 rate limit objects with various attributes +func (s *KeeperTestSuite) createRateLimits() []types.RateLimit { + rateLimits := []types.RateLimit{} + for i := 1; i <= 5; i++ { + suffix := strconv.Itoa(i) + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom-" + suffix, ChannelOrClientId: "channel-" + suffix}, + Flow: &types.Flow{Inflow: sdkmath.NewInt(10), Outflow: sdkmath.NewInt(10)}, + } + + rateLimits = append(rateLimits, rateLimit) + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), rateLimit) + } + return rateLimits +} + +func (s *KeeperTestSuite) TestGetRateLimit() { + rateLimits := s.createRateLimits() + + expectedRateLimit := rateLimits[0] + denom := expectedRateLimit.Path.Denom + channelID := expectedRateLimit.Path.ChannelOrClientId + + actualRateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found, "element should have been found, but was not") + s.Require().Equal(expectedRateLimit, actualRateLimit) +} + +func (s *KeeperTestSuite) TestRemoveRateLimit() { + rateLimits := s.createRateLimits() + + rateLimitToRemove := rateLimits[0] + denomToRemove := rateLimitToRemove.Path.Denom + channelIDToRemove := rateLimitToRemove.Path.ChannelOrClientId + + s.chainA.GetSimApp().RateLimitKeeper.RemoveRateLimit(s.chainA.GetContext(), denomToRemove, channelIDToRemove) + _, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denomToRemove, channelIDToRemove) + s.Require().False(found, "the removed element should not have been found, but it was") +} + +func (s *KeeperTestSuite) TestResetRateLimit() { + rateLimits := s.createRateLimits() + + rateLimitToReset := rateLimits[0] + denomToRemove := rateLimitToReset.Path.Denom + channelIDToRemove := rateLimitToReset.Path.ChannelOrClientId + + err := s.chainA.GetSimApp().RateLimitKeeper.ResetRateLimit(s.chainA.GetContext(), denomToRemove, channelIDToRemove) + s.Require().NoError(err) + + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denomToRemove, channelIDToRemove) + s.Require().True(found, "element should have been found, but was not") + s.Require().Zero(rateLimit.Flow.Inflow.Int64(), "Inflow should have been reset to 0") + s.Require().Zero(rateLimit.Flow.Outflow.Int64(), "Outflow should have been reset to 0") +} + +func (s *KeeperTestSuite) TestGetAllRateLimits() { + expectedRateLimits := s.createRateLimits() + actualRateLimits := s.chainA.GetSimApp().RateLimitKeeper.GetAllRateLimits(s.chainA.GetContext()) + s.Require().Len(actualRateLimits, len(expectedRateLimits)) + s.Require().ElementsMatch(expectedRateLimits, actualRateLimits, "all rate limits") +} + +func (s *KeeperTestSuite) TestAddRateLimit_ClientId() { + // Setup client between chain A and chain B + path := ibctesting.NewPath(s.chainA, s.chainB) + s.coordinator.SetupClients(path) + clientID := path.EndpointA.ClientID + + // Mock GetChannelValue to return non-zero + // Note: This might require adjusting the test suite setup if GetChannelValue isn't easily mockable. + // For now, assume it works or the underlying bank keeper has supply. + // A more robust test might involve actually sending tokens. + // Mint some tokens for the denom to ensure channel value is non-zero + mintAmount := sdkmath.NewInt(1000) + mintCoins := sdk.NewCoins(sdk.NewCoin("clientdenom", mintAmount)) + // Revert: Mint back to the transfer module account + err := s.chainA.GetSimApp().BankKeeper.MintCoins(s.chainA.GetContext(), transfertypes.ModuleName, mintCoins) + s.Require().NoError(err, "minting coins failed") + + msg := &types.MsgAddRateLimit{ + Signer: s.chainA.GetSimApp().RateLimitKeeper.GetAuthority(), + Denom: "clientdenom", + ChannelOrClientId: clientID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + } + + // Add the rate limit using the client ID + err = s.chainA.GetSimApp().RateLimitKeeper.AddRateLimit(s.chainA.GetContext(), msg) + s.Require().NoError(err, "adding rate limit with client ID should succeed") + + // Verify the rate limit was stored correctly + _, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), msg.Denom, clientID) + s.Require().True(found, "rate limit added with client ID should be found") + + // Test adding with an invalid ID (neither channel nor client) + invalidID := "invalid-id" + msgInvalid := &types.MsgAddRateLimit{ + Signer: s.chainA.GetSimApp().RateLimitKeeper.GetAuthority(), + Denom: "clientdenom", + ChannelOrClientId: invalidID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + } + err = s.chainA.GetSimApp().RateLimitKeeper.AddRateLimit(s.chainA.GetContext(), msgInvalid) + s.Require().ErrorIs(err, types.ErrChannelNotFound, "adding rate limit with invalid ID should fail") +} diff --git a/modules/apps/rate-limiting/keeper/whitelist.go b/modules/apps/rate-limiting/keeper/whitelist.go new file mode 100644 index 00000000000..c9980f3d6ea --- /dev/null +++ b/modules/apps/rate-limiting/keeper/whitelist.go @@ -0,0 +1,58 @@ +package keeper + +import ( + "cosmossdk.io/store/prefix" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Adds an pair of sender and receiver addresses to the whitelist to allow all +// IBC transfers between those addresses to skip all flow calculations +func (k Keeper) SetWhitelistedAddressPair(ctx sdk.Context, whitelist types.WhitelistedAddressPair) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.AddressWhitelistKeyPrefix) + key := types.AddressWhitelistKey(whitelist.Sender, whitelist.Receiver) + value := k.cdc.MustMarshal(&whitelist) + store.Set(key, value) +} + +// Removes a whitelisted address pair so that it's transfers are counted in the quota +func (k Keeper) RemoveWhitelistedAddressPair(ctx sdk.Context, sender, receiver string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.AddressWhitelistKeyPrefix) + key := types.AddressWhitelistKey(sender, receiver) + store.Delete(key) +} + +// Check if a sender/receiver address pair is currently whitelisted +func (k Keeper) IsAddressPairWhitelisted(ctx sdk.Context, sender, receiver string) bool { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.AddressWhitelistKeyPrefix) + + key := types.AddressWhitelistKey(sender, receiver) + value := store.Get(key) + found := len(value) != 0 + + return found +} + +// Get all the whitelisted addresses +func (k Keeper) GetAllWhitelistedAddressPairs(ctx sdk.Context) []types.WhitelistedAddressPair { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.AddressWhitelistKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allWhitelistedAddresses := []types.WhitelistedAddressPair{} + for ; iterator.Valid(); iterator.Next() { + whitelist := types.WhitelistedAddressPair{} + k.cdc.MustUnmarshal(iterator.Value(), &whitelist) + allWhitelistedAddresses = append(allWhitelistedAddresses, whitelist) + } + + return allWhitelistedAddresses +} diff --git a/modules/apps/rate-limiting/keeper/whitelist_test.go b/modules/apps/rate-limiting/keeper/whitelist_test.go new file mode 100644 index 00000000000..7bf24b0a2be --- /dev/null +++ b/modules/apps/rate-limiting/keeper/whitelist_test.go @@ -0,0 +1,54 @@ +package keeper_test + +import ( + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func (s *KeeperTestSuite) TestAddressWhitelist() { + // Store addresses in whitelist + expectedWhitelist := []types.WhitelistedAddressPair{ + {Sender: "sender-1", Receiver: "receiver-1"}, + {Sender: "sender-2", Receiver: "receiver-2"}, + {Sender: "sender-3", Receiver: "receiver-3"}, + } + for _, addressPair := range expectedWhitelist { + s.chainA.GetSimApp().RateLimitKeeper.SetWhitelistedAddressPair(s.chainA.GetContext(), addressPair) + } + + // Confirm that each was found + for _, addressPair := range expectedWhitelist { + found := s.chainA.GetSimApp().RateLimitKeeper.IsAddressPairWhitelisted(s.chainA.GetContext(), addressPair.Sender, addressPair.Receiver) + s.Require().True(found, "address pair should have been whitelisted (%s/%s)", + addressPair.Sender, addressPair.Receiver) + } + + // Confirm that looking both the sender and receiver must match for the pair to be whitelisted + for _, addressPair := range expectedWhitelist { + found := s.chainA.GetSimApp().RateLimitKeeper.IsAddressPairWhitelisted(s.chainA.GetContext(), addressPair.Sender, "fake-receiver") + s.Require().False(found, "address pair should not have been whitelisted (%s/%s)", + addressPair.Sender, "fake-receiver") + + found = s.chainA.GetSimApp().RateLimitKeeper.IsAddressPairWhitelisted(s.chainA.GetContext(), "fake-sender", addressPair.Receiver) + s.Require().False(found, "address pair should not have been whitelisted (%s/%s)", + "fake-sender", addressPair.Receiver) + } + + // Check GetAll + actualWhitelist := s.chainA.GetSimApp().RateLimitKeeper.GetAllWhitelistedAddressPairs(s.chainA.GetContext()) + s.Require().Equal(expectedWhitelist, actualWhitelist, "whitelist get all") + + // Finally, remove each from whitelist + for _, addressPair := range expectedWhitelist { + s.chainA.GetSimApp().RateLimitKeeper.RemoveWhitelistedAddressPair(s.chainA.GetContext(), addressPair.Sender, addressPair.Receiver) + } + + // Confirm there are no longer any whitelisted pairs + actualWhitelist = s.chainA.GetSimApp().RateLimitKeeper.GetAllWhitelistedAddressPairs(s.chainA.GetContext()) + s.Require().Empty(actualWhitelist, "whitelist should have been cleared") + + for _, addressPair := range expectedWhitelist { + found := s.chainA.GetSimApp().RateLimitKeeper.IsAddressPairWhitelisted(s.chainA.GetContext(), addressPair.Sender, addressPair.Receiver) + s.Require().False(found, "address pair should no longer be whitelisted (%s/%s)", + addressPair.Sender, addressPair.Receiver) + } +} diff --git a/modules/apps/rate-limiting/module.go b/modules/apps/rate-limiting/module.go new file mode 100644 index 00000000000..5834ad09b07 --- /dev/null +++ b/modules/apps/rate-limiting/module.go @@ -0,0 +1,137 @@ +package ratelimiting + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + "cosmossdk.io/core/appmodule" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/client/cli" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +var ( + _ module.AppModule = (*AppModule)(nil) + _ module.AppModuleBasic = (*AppModuleBasic)(nil) + _ module.HasGenesis = (*AppModule)(nil) + _ module.HasName = (*AppModule)(nil) + _ module.HasConsensusVersion = (*AppModule)(nil) + _ module.HasServices = (*AppModule)(nil) + _ appmodule.AppModule = (*AppModule)(nil) + _ appmodule.HasBeginBlocker = (*AppModule)(nil) + + // Note: IBCMiddleware implements porttypes.Middleware and porttypes.ICS4Wrapper +) + +// AppModuleBasic is the rate-limiting AppModuleBasic +type AppModuleBasic struct{} + +// Name implements AppModuleBasic interface +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// IsOnePerModuleType implements the depinject.OnePerModuleType interface. +func (AppModule) IsOnePerModuleType() {} + +// IsAppModule implements the appmodule.AppModule interface. +func (AppModule) IsAppModule() {} + +// RegisterLegacyAminoCodec implements AppModuleBasic interface +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers module concrete types into protobuf Any. +func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) { + types.RegisterInterfaces(registry) +} + +// DefaultGenesis returns default genesis state as raw bytes for the rate-limiting +// module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis performs genesis state validation for the rate-limiting module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { + var gs types.GenesisState + if err := cdc.UnmarshalJSON(bz, &gs); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + + return gs.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the rate-limiting module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil { + panic(err) + } +} + +// GetTxCmd implements AppModuleBasic interface +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd implements AppModuleBasic interface +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// AppModule represents the AppModule for this module +type AppModule struct { + AppModuleBasic + keeper keeper.Keeper +} + +// NewAppModule creates a new rate-limiting module +func NewAppModule(k keeper.Keeper) AppModule { + return AppModule{ + keeper: k, + } +} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) // Use the msgServer implementation + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// InitGenesis performs genesis initialization for the rate-limiting module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) { + var genesisState types.GenesisState + cdc.MustUnmarshalJSON(data, &genesisState) + am.keeper.InitGenesis(ctx, genesisState) +} + +// ExportGenesis returns the exported genesis state as raw bytes for the rate-limiting +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + gs := am.keeper.ExportGenesis(ctx) + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements AppModule/ConsensusVersion defining the current version of rate-limiting. +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock implements the AppModule interface +func (am AppModule) BeginBlock(ctx context.Context) error { + sdkCtx := sdk.UnwrapSDKContext(ctx) + am.keeper.BeginBlocker(sdkCtx) + // we do not want to raise an error in block processing if rate limit reset fails + return nil +} diff --git a/modules/apps/rate-limiting/types/codec.go b/modules/apps/rate-limiting/types/codec.go new file mode 100644 index 00000000000..7f9d7959e9a --- /dev/null +++ b/modules/apps/rate-limiting/types/codec.go @@ -0,0 +1,34 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +// RegisterLegacyAminoCodec registers the necessary rate-limiting interfaces and concrete types +// on the provided LegacyAmino codec. These types are used for Amino JSON serialization. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgAddRateLimit{}, "ratelimit/MsgAddRateLimit", nil) + cdc.RegisterConcrete(&MsgUpdateRateLimit{}, "ratelimit/MsgUpdateRateLimit", nil) + cdc.RegisterConcrete(&MsgRemoveRateLimit{}, "ratelimit/MsgRemoveRateLimit", nil) + cdc.RegisterConcrete(&MsgResetRateLimit{}, "ratelimit/MsgResetRateLimit", nil) +} + +// RegisterInterfaces registers the rate-limiting interfaces types with the interface registry +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgAddRateLimit{}, + &MsgUpdateRateLimit{}, + &MsgRemoveRateLimit{}, + &MsgResetRateLimit{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +// ModuleCdc references the global rate-limiting module codec. Note, the codec should +// ONLY be used in certain instances of tests and for JSON encoding. +var ModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) diff --git a/modules/apps/rate-limiting/types/codec_test.go b/modules/apps/rate-limiting/types/codec_test.go new file mode 100644 index 00000000000..9fefd780e59 --- /dev/null +++ b/modules/apps/rate-limiting/types/codec_test.go @@ -0,0 +1,62 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + + ratelimiting "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func TestCodecTypeRegistration(t *testing.T) { + testCases := []struct { + name string + typeURL string + errMsg string + }{ + { + "success: MsgAddRateLimit", + sdk.MsgTypeURL(&types.MsgAddRateLimit{}), + "", + }, + { + "success: MsgUpdateRateLimit", + sdk.MsgTypeURL(&types.MsgUpdateRateLimit{}), + "", + }, + { + "success: MsgRemoveRateLimit", + sdk.MsgTypeURL(&types.MsgRemoveRateLimit{}), + "", + }, + { + "success: MsgResetRateLimit", + sdk.MsgTypeURL(&types.MsgResetRateLimit{}), + "", + }, + { + "type not registered on codec", + "ibc.invalid.MsgTypeURL", + "unable to resolve type URL", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + encodingCfg := moduletestutil.MakeTestEncodingConfig(ratelimiting.AppModuleBasic{}) + msg, err := encodingCfg.Codec.InterfaceRegistry().Resolve(tc.typeURL) + + if tc.errMsg == "" { + require.NotNil(t, msg) + require.NoError(t, err) + } else { + require.Nil(t, msg) + require.ErrorContains(t, err, tc.errMsg) + } + }) + } +} diff --git a/modules/apps/rate-limiting/types/errors.go b/modules/apps/rate-limiting/types/errors.go new file mode 100644 index 00000000000..bd2f1f1c4f3 --- /dev/null +++ b/modules/apps/rate-limiting/types/errors.go @@ -0,0 +1,19 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +var ( + ErrRateLimitAlreadyExists = errorsmod.Register(ModuleName, 1, "ratelimit key duplicated") + ErrRateLimitNotFound = errorsmod.Register(ModuleName, 2, "rate limit not found") + ErrZeroChannelValue = errorsmod.Register(ModuleName, 3, "channel value is zero") + ErrQuotaExceeded = errorsmod.Register(ModuleName, 4, "quota exceeded") + ErrInvalidClientState = errorsmod.Register(ModuleName, 5, "unable to determine client state from channelId") + ErrChannelNotFound = errorsmod.Register(ModuleName, 6, "channel does not exist") + ErrDenomIsBlacklisted = errorsmod.Register(ModuleName, 7, "denom is blacklisted") + ErrUnsupportedAttribute = errorsmod.Register(ModuleName, 8, "unsupported attribute") + ErrEpochNotFound = errorsmod.Register(ModuleName, 9, "hour epoch not found in store") + ErrUnmarshalEpoch = errorsmod.Register(ModuleName, 10, "could not unmarshal epochBz") + ErrInvalidEpoce = errorsmod.Register(ModuleName, 11, "invalid hour epoch") +) diff --git a/modules/apps/rate-limiting/types/events.go b/modules/apps/rate-limiting/types/events.go new file mode 100644 index 00000000000..1bf6f59583e --- /dev/null +++ b/modules/apps/rate-limiting/types/events.go @@ -0,0 +1,16 @@ +package types + +var ( + EventTransferDenied = "transfer_denied" + + EventRateLimitExceeded = "rate_limit_exceeded" + EventBlacklistedDenom = "blacklisted_denom" + + AttributeKeyReason = "reason" + AttributeKeyModule = "module" + AttributeKeyAction = "action" + AttributeKeyDenom = "denom" + AttributeKeyChannelOrClient = "channel_or_client" + AttributeKeyAmount = "amount" + AttributeKeyError = "error" +) diff --git a/modules/apps/rate-limiting/types/expected_keepers.go b/modules/apps/rate-limiting/types/expected_keepers.go new file mode 100644 index 00000000000..f99f7019c4e --- /dev/null +++ b/modules/apps/rate-limiting/types/expected_keepers.go @@ -0,0 +1,28 @@ +package types + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +// BankKeeper defines the expected bank keeper +type BankKeeper interface { + GetSupply(ctx context.Context, denom string) sdk.Coin +} + +// ChannelKeeper defines the expected IBC channel keeper +type ChannelKeeper interface { + GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) + GetChannelClientState(ctx sdk.Context, portID, channelID string) (clientID string, clientState exported.ClientState, err error) + GetNextSequenceSend(ctx sdk.Context, sourcePort, sourceChannel string) (uint64, bool) +} + +// ClientKeeper defines the expected IBC client keeper +type ClientKeeper interface { + GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool) + GetClientStatus(ctx sdk.Context, clientID string) exported.Status +} diff --git a/modules/apps/rate-limiting/types/flow.go b/modules/apps/rate-limiting/types/flow.go new file mode 100644 index 00000000000..916eed11d6b --- /dev/null +++ b/modules/apps/rate-limiting/types/flow.go @@ -0,0 +1,43 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" +) + +// Initializes a new flow from the channel value +func NewFlow(channelValue sdkmath.Int) Flow { + flow := Flow{ + ChannelValue: channelValue, + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + } + + return flow +} + +// Adds an amount to the rate limit's flow after an incoming packet was received +// Returns an error if the new inflow will cause the rate limit to exceed its quota +func (f *Flow) AddInflow(amount sdkmath.Int, quota Quota) error { + netInflow := f.Inflow.Sub(f.Outflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_RECV, netInflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, "Inflow exceeds quota - Net Inflow: %v, Channel Value: %v, Threshold: %v%%", netInflow, f.ChannelValue, quota.MaxPercentRecv) + } + + f.Inflow = f.Inflow.Add(amount) + return nil +} + +// Adds an amount to the rate limit's flow after a packet was sent +// Returns an error if the new outflow will cause the rate limit to exceed its quota +func (f *Flow) AddOutflow(amount sdkmath.Int, quota Quota) error { + netOutflow := f.Outflow.Sub(f.Inflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_SEND, netOutflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, "Outflow exceeds quota - Net Outflow: %v, Channel Value: %v, Threshold: %v%%", netOutflow, f.ChannelValue, quota.MaxPercentSend) + } + + f.Outflow = f.Outflow.Add(amount) + return nil +} diff --git a/modules/apps/rate-limiting/types/genesis.go b/modules/apps/rate-limiting/types/genesis.go new file mode 100644 index 00000000000..3925ec75003 --- /dev/null +++ b/modules/apps/rate-limiting/types/genesis.go @@ -0,0 +1,70 @@ +package types + +import ( + "errors" + "fmt" + "strconv" + "strings" + time "time" + + errorsmod "cosmossdk.io/errors" +) + +// Splits a pending send packet of the form {channelId}/{sequenceNumber} into the channel Id +// and sequence number respectively +func ParsePendingPacketID(pendingPacketID string) (channelID string, sequence uint64, err error) { + splits := strings.Split(pendingPacketID, "/") + if len(splits) != 2 { + return "", 0, fmt.Errorf("invalid pending send packet (%s), must be of form: {channelId}/{sequenceNumber}", pendingPacketID) + } + channelID = splits[0] + sequenceString := splits[1] + + sequence, err = strconv.ParseUint(sequenceString, 10, 64) + if err != nil { + return "", 0, errorsmod.Wrapf(err, "unable to parse sequence number (%s) from pending send packet, %s", sequenceString, err) + } + + return channelID, sequence, nil +} + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + RateLimits: []RateLimit{}, + WhitelistedAddressPairs: []WhitelistedAddressPair{}, + BlacklistedDenoms: make([]string, 0), + PendingSendPacketSequenceNumbers: make([]string, 0), + HourEpoch: HourEpoch{ + EpochNumber: 0, + Duration: time.Hour, + }, + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + for _, pendingPacketID := range gs.PendingSendPacketSequenceNumbers { + if _, _, err := ParsePendingPacketID(pendingPacketID); err != nil { + return err + } + } + + // Verify the epoch hour duration is specified + if gs.HourEpoch.Duration == 0 { + return errors.New("hour epoch duration must be specified") + } + + // If the hour epoch has been initialized already (epoch number != 0), validate and then use it + if gs.HourEpoch.EpochNumber > 0 { + if gs.HourEpoch.EpochStartTime.Equal(time.Time{}) { + return errors.New("if hour epoch number is non-empty, epoch time must be initialized") + } + if gs.HourEpoch.EpochStartHeight == 0 { + return errors.New("if hour epoch number is non-empty, epoch height must be initialized") + } + } + + return nil +} diff --git a/modules/apps/rate-limiting/types/genesis.pb.go b/modules/apps/rate-limiting/types/genesis.pb.go new file mode 100644 index 00000000000..82f9be449d3 --- /dev/null +++ b/modules/apps/rate-limiting/types/genesis.pb.go @@ -0,0 +1,569 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ratelimit module's genesis state. +type GenesisState struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` + WhitelistedAddressPairs []WhitelistedAddressPair `protobuf:"bytes,2,rep,name=whitelisted_address_pairs,json=whitelistedAddressPairs,proto3" json:"whitelisted_address_pairs"` + BlacklistedDenoms []string `protobuf:"bytes,3,rep,name=blacklisted_denoms,json=blacklistedDenoms,proto3" json:"blacklisted_denoms,omitempty"` + PendingSendPacketSequenceNumbers []string `protobuf:"bytes,4,rep,name=pending_send_packet_sequence_numbers,json=pendingSendPacketSequenceNumbers,proto3" json:"pending_send_packet_sequence_numbers,omitempty"` + HourEpoch HourEpoch `protobuf:"bytes,5,opt,name=hour_epoch,json=hourEpoch,proto3" json:"hour_epoch"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_0f0dbc611075e553, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +func (m *GenesisState) GetWhitelistedAddressPairs() []WhitelistedAddressPair { + if m != nil { + return m.WhitelistedAddressPairs + } + return nil +} + +func (m *GenesisState) GetBlacklistedDenoms() []string { + if m != nil { + return m.BlacklistedDenoms + } + return nil +} + +func (m *GenesisState) GetPendingSendPacketSequenceNumbers() []string { + if m != nil { + return m.PendingSendPacketSequenceNumbers + } + return nil +} + +func (m *GenesisState) GetHourEpoch() HourEpoch { + if m != nil { + return m.HourEpoch + } + return HourEpoch{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibc.applications.rate_limiting.v1.GenesisState") +} + +func init() { + proto.RegisterFile("ibc/applications/rate_limiting/v1/genesis.proto", fileDescriptor_0f0dbc611075e553) +} + +var fileDescriptor_0f0dbc611075e553 = []byte{ + // 400 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x3d, 0x6f, 0xd4, 0x30, + 0x18, 0x4e, 0x48, 0x41, 0xaa, 0xcb, 0x42, 0x84, 0x44, 0xe8, 0x10, 0x02, 0x62, 0xe8, 0xc0, 0xc5, + 0x1c, 0x88, 0x81, 0x81, 0x01, 0x04, 0x82, 0x01, 0x55, 0xe5, 0x32, 0x54, 0x62, 0xb1, 0x1c, 0xfb, + 0x55, 0x62, 0x35, 0xb1, 0x8d, 0x5f, 0xe7, 0x2a, 0xc4, 0x9f, 0xe0, 0xb7, 0xf0, 0x2b, 0x3a, 0x76, + 0x64, 0x42, 0xe8, 0xee, 0x8f, 0xa0, 0x7c, 0xd0, 0x0f, 0xa9, 0xd2, 0xdd, 0x66, 0xbf, 0xcf, 0xd7, + 0x6b, 0xeb, 0x21, 0x54, 0x95, 0x82, 0x72, 0x6b, 0x1b, 0x25, 0xb8, 0x57, 0x46, 0x23, 0x75, 0xdc, + 0x03, 0x6b, 0x54, 0xab, 0xbc, 0xd2, 0x15, 0x5d, 0xce, 0x69, 0x05, 0x1a, 0x50, 0x61, 0x6e, 0x9d, + 0xf1, 0x26, 0x7e, 0xac, 0x4a, 0x91, 0x5f, 0x15, 0xe4, 0xd7, 0x04, 0xf9, 0x72, 0xbe, 0x7f, 0xbf, + 0x32, 0x95, 0x19, 0xd8, 0xb4, 0x3f, 0x8d, 0xc2, 0xfd, 0x57, 0x9b, 0x93, 0xae, 0x3b, 0x0d, 0xb2, + 0x27, 0xbf, 0x22, 0x72, 0xf7, 0xe3, 0xb8, 0x41, 0xe1, 0xb9, 0x87, 0xb8, 0x20, 0x7b, 0x97, 0x3c, + 0x4c, 0xc2, 0x2c, 0x3a, 0xd8, 0x7b, 0xf1, 0x2c, 0xdf, 0xb8, 0x56, 0xbe, 0xe0, 0x1e, 0x3e, 0xf7, + 0xf7, 0x77, 0x3b, 0x67, 0x7f, 0x1e, 0x05, 0x0b, 0xe2, 0xfe, 0x0f, 0x30, 0xfe, 0x41, 0x1e, 0x9e, + 0xd6, 0xca, 0x43, 0xa3, 0xd0, 0x83, 0x64, 0x5c, 0x4a, 0x07, 0x88, 0xcc, 0x72, 0xe5, 0x30, 0xb9, + 0x35, 0x44, 0xbc, 0xde, 0x22, 0xe2, 0xf8, 0xd2, 0xe3, 0xed, 0x68, 0x71, 0xc4, 0x95, 0x9b, 0xf2, + 0x1e, 0x9c, 0xde, 0x88, 0x62, 0x3c, 0x23, 0x71, 0xd9, 0x70, 0x71, 0x32, 0x85, 0x4b, 0xd0, 0xa6, + 0xc5, 0x24, 0xca, 0xa2, 0x83, 0xdd, 0xc5, 0xbd, 0x2b, 0xc8, 0xfb, 0x01, 0x88, 0x0f, 0xc9, 0x53, + 0x0b, 0x5a, 0x2a, 0x5d, 0x31, 0x04, 0x2d, 0x99, 0xe5, 0xe2, 0x04, 0x3c, 0x43, 0xf8, 0xd6, 0x81, + 0x16, 0xc0, 0x74, 0xd7, 0x96, 0xe0, 0x30, 0xd9, 0x19, 0x0c, 0xb2, 0x89, 0x5b, 0x80, 0x96, 0x47, + 0x03, 0xb3, 0x98, 0x88, 0x87, 0x23, 0x2f, 0xfe, 0x42, 0x48, 0x6d, 0x3a, 0xc7, 0xc0, 0x1a, 0x51, + 0x27, 0xb7, 0xb3, 0x70, 0xcb, 0xff, 0xfc, 0x64, 0x3a, 0xf7, 0xa1, 0xd7, 0x4c, 0xef, 0xdb, 0xad, + 0x2f, 0x06, 0xc7, 0x67, 0xab, 0x34, 0x3c, 0x5f, 0xa5, 0xe1, 0xdf, 0x55, 0x1a, 0xfe, 0x5c, 0xa7, + 0xc1, 0xf9, 0x3a, 0x0d, 0x7e, 0xaf, 0xd3, 0xe0, 0xeb, 0x9b, 0x4a, 0xf9, 0xba, 0x2b, 0x73, 0x61, + 0x5a, 0x2a, 0x0c, 0xb6, 0x06, 0xfb, 0x06, 0xce, 0x2a, 0x43, 0x97, 0xf3, 0xe7, 0xb4, 0x35, 0xb2, + 0x6b, 0x00, 0xfb, 0x9a, 0x8c, 0xf5, 0x98, 0x5d, 0xd4, 0xc3, 0x7f, 0xb7, 0x80, 0xe5, 0x9d, 0xa1, + 0x14, 0x2f, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x74, 0x8b, 0x54, 0x89, 0xb7, 0x02, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.HourEpoch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.PendingSendPacketSequenceNumbers) > 0 { + for iNdEx := len(m.PendingSendPacketSequenceNumbers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PendingSendPacketSequenceNumbers[iNdEx]) + copy(dAtA[i:], m.PendingSendPacketSequenceNumbers[iNdEx]) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PendingSendPacketSequenceNumbers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.BlacklistedDenoms) > 0 { + for iNdEx := len(m.BlacklistedDenoms) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BlacklistedDenoms[iNdEx]) + copy(dAtA[i:], m.BlacklistedDenoms[iNdEx]) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.BlacklistedDenoms[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.WhitelistedAddressPairs) > 0 { + for iNdEx := len(m.WhitelistedAddressPairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.WhitelistedAddressPairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.WhitelistedAddressPairs) > 0 { + for _, e := range m.WhitelistedAddressPairs { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.BlacklistedDenoms) > 0 { + for _, s := range m.BlacklistedDenoms { + l = len(s) + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.PendingSendPacketSequenceNumbers) > 0 { + for _, s := range m.PendingSendPacketSequenceNumbers { + l = len(s) + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.HourEpoch.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhitelistedAddressPairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhitelistedAddressPairs = append(m.WhitelistedAddressPairs, WhitelistedAddressPair{}) + if err := m.WhitelistedAddressPairs[len(m.WhitelistedAddressPairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlacklistedDenoms", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlacklistedDenoms = append(m.BlacklistedDenoms, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingSendPacketSequenceNumbers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PendingSendPacketSequenceNumbers = append(m.PendingSendPacketSequenceNumbers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HourEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HourEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/rate-limiting/types/genesis_test.go b/modules/apps/rate-limiting/types/genesis_test.go new file mode 100644 index 00000000000..628cb2962bb --- /dev/null +++ b/modules/apps/rate-limiting/types/genesis_test.go @@ -0,0 +1,97 @@ +package types_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func TestValidateGenesis(t *testing.T) { + currentHour := 13 + blockTime := time.Date(2024, 1, 1, currentHour, 55, 8, 0, time.UTC) // 13:55:08 + + testCases := []struct { + name string + genesisState types.GenesisState + expectedError string + }{ + { + name: "valid default state", + genesisState: *types.DefaultGenesis(), + }, + { + name: "valid custom state", + genesisState: types.GenesisState{ + WhitelistedAddressPairs: []types.WhitelistedAddressPair{ + {Sender: "senderA", Receiver: "receiverA"}, + {Sender: "senderB", Receiver: "receiverB"}, + }, + BlacklistedDenoms: []string{"denomA", "denomB"}, + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2/3"}, + HourEpoch: types.HourEpoch{ + EpochNumber: 1, + EpochStartTime: blockTime, + Duration: time.Minute, + EpochStartHeight: 1, + }, + }, + }, + { + name: "invalid packet sequence - wrong delimiter", + genesisState: types.GenesisState{ + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2|3"}, + }, + expectedError: "invalid pending send packet (channel-2|3), must be of form: {channelId}/{sequenceNumber}", + }, + { + name: "invalid packet sequence - invalid sequence", + genesisState: types.GenesisState{ + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2/X"}, + }, + expectedError: "unable to parse sequence number (X) from pending send packet", + }, + { + name: "invalid hour epoch - no duration", + genesisState: types.GenesisState{ + HourEpoch: types.HourEpoch{}, + }, + expectedError: "hour epoch duration must be specified", + }, + { + name: "invalid hour epoch - no epoch time", + genesisState: types.GenesisState{ + HourEpoch: types.HourEpoch{ + EpochNumber: 1, + EpochStartHeight: 1, + Duration: time.Minute, + }, + }, + expectedError: "if hour epoch number is non-empty, epoch time must be initialized", + }, + { + name: "invalid hour epoch - no epoch height", + genesisState: types.GenesisState{ + HourEpoch: types.HourEpoch{ + EpochNumber: 1, + EpochStartTime: blockTime, + Duration: time.Minute, + }, + }, + expectedError: "if hour epoch number is non-empty, epoch height must be initialized", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.genesisState.Validate() + if tc.expectedError != "" { + require.ErrorContains(t, err, tc.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/modules/apps/rate-limiting/types/keys.go b/modules/apps/rate-limiting/types/keys.go new file mode 100644 index 00000000000..1ba017419bd --- /dev/null +++ b/modules/apps/rate-limiting/types/keys.go @@ -0,0 +1,57 @@ +package types + +import ( + "encoding/binary" +) + +const ( + // ModuleName defines the IBC rate-limiting name + ModuleName = "ratelimiting" + + // StoreKey is the store key string for IBC rate-limiting + StoreKey = ModuleName + + // RouterKey is the message route for IBC rate-limiting + RouterKey = ModuleName + + // QuerierRoute is the querier route for IBC rate-limiting + QuerierRoute = ModuleName +) + +func bytes(p string) []byte { + return []byte(p) +} + +var ( + RateLimitKeyPrefix = bytes("rate-limit") + PendingSendPacketPrefix = bytes("pending-send-packet") + DenomBlacklistKeyPrefix = bytes("denom-blacklist") + // TODO: Fix IBCGO-2368 + AddressWhitelistKeyPrefix = bytes("address-blacklist") + HourEpochKey = bytes("hour-epoch") + + PendingSendPacketChannelLength = 16 +) + +// Get the rate limit byte key built from the denom and channelId +func RateLimitItemKey(denom string, channelID string) []byte { + return append(bytes(denom), bytes(channelID)...) +} + +// Get the pending send packet key from the channel ID and sequence number +// The channel ID must be fixed length to allow for extracting the underlying +// values from a key +func PendingSendPacketKey(channelID string, sequenceNumber uint64) []byte { + channelIDBz := make([]byte, PendingSendPacketChannelLength) + copy(channelIDBz, channelID) + + sequenceNumberBz := make([]byte, 8) + binary.BigEndian.PutUint64(sequenceNumberBz, sequenceNumber) + + return append(channelIDBz, sequenceNumberBz...) +} + +// Get the whitelist path key from a sender and receiver address +func AddressWhitelistKey(sender, receiver string) []byte { + return append(bytes(sender), bytes(receiver)...) +} diff --git a/modules/apps/rate-limiting/types/msgs.go b/modules/apps/rate-limiting/types/msgs.go new file mode 100644 index 00000000000..f4f5977656d --- /dev/null +++ b/modules/apps/rate-limiting/types/msgs.go @@ -0,0 +1,196 @@ +package types + +import ( + "regexp" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" +) + +var ( + _ sdk.Msg = &MsgAddRateLimit{} + _ sdk.Msg = &MsgUpdateRateLimit{} + _ sdk.Msg = &MsgRemoveRateLimit{} + _ sdk.Msg = &MsgResetRateLimit{} +) + +// ---------------------------------------------- +// MsgAddRateLimit +// ---------------------------------------------- + +func NewMsgAddRateLimit(denom, channelOrClientID string, maxPercentSend sdkmath.Int, maxPercentRecv sdkmath.Int, durationHours uint64) *MsgAddRateLimit { + return &MsgAddRateLimit{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +func (msg *MsgAddRateLimit) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid signer address (%s)", err) + } + + if msg.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", msg.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, msg.ChannelOrClientId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", msg.ChannelOrClientId) + } + if !matched && !clienttypes.IsValidClientID(msg.ChannelOrClientId) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "invalid channel or client-id (%s), must be of the format 'channel-{N}' or a valid client-id", msg.ChannelOrClientId) + } + + if msg.MaxPercentSend.GT(sdkmath.NewInt(100)) || msg.MaxPercentSend.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentSend) + } + + if msg.MaxPercentRecv.GT(sdkmath.NewInt(100)) || msg.MaxPercentRecv.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentRecv) + } + + if msg.MaxPercentRecv.IsZero() && msg.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "either the max send or max receive threshold must be greater than 0") + } + + if msg.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +// ---------------------------------------------- +// MsgUpdateRateLimit +// ---------------------------------------------- + +func NewMsgUpdateRateLimit(denom, channelOrClientID string, maxPercentSend sdkmath.Int, maxPercentRecv sdkmath.Int, durationHours uint64) *MsgUpdateRateLimit { + return &MsgUpdateRateLimit{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +func (msg *MsgUpdateRateLimit) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid signer address (%s)", err) + } + + if msg.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", msg.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, msg.ChannelOrClientId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", msg.ChannelOrClientId) + } + if !matched && !clienttypes.IsValidClientID(msg.ChannelOrClientId) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "invalid channel or client-id (%s), must be of the format 'channel-{N}' or a valid client-id", msg.ChannelOrClientId) + } + + if msg.MaxPercentSend.GT(sdkmath.NewInt(100)) || msg.MaxPercentSend.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentSend) + } + + if msg.MaxPercentRecv.GT(sdkmath.NewInt(100)) || msg.MaxPercentRecv.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentRecv) + } + + if msg.MaxPercentRecv.IsZero() && msg.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "either the max send or max receive threshold must be greater than 0") + } + + if msg.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +// ---------------------------------------------- +// MsgRemoveRateLimit +// ---------------------------------------------- + +func NewMsgRemoveRateLimit(denom, channelOrClientID string) *MsgRemoveRateLimit { + return &MsgRemoveRateLimit{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + } +} + +func (msg *MsgRemoveRateLimit) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid signer address (%s)", err) + } + + if msg.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", msg.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, msg.ChannelOrClientId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", msg.ChannelOrClientId) + } + if !matched && !clienttypes.IsValidClientID(msg.ChannelOrClientId) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "invalid channel or client-id (%s), must be of the format 'channel-{N}' or a valid client-id", msg.ChannelOrClientId) + } + + return nil +} + +// ---------------------------------------------- +// MsgResetRateLimit +// ---------------------------------------------- + +func NewMsgResetRateLimit(denom, channelOrClientID string) *MsgResetRateLimit { + return &MsgResetRateLimit{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + } +} + +func (msg *MsgResetRateLimit) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid signer address (%s)", err) + } + + if msg.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", msg.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, msg.ChannelOrClientId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", msg.ChannelOrClientId) + } + if !matched && !clienttypes.IsValidClientID(msg.ChannelOrClientId) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "invalid channel or client-id (%s), must be of the format 'channel-{N}' or a valid client-id", msg.ChannelOrClientId) + } + + return nil +} diff --git a/modules/apps/rate-limiting/types/msgs_test.go b/modules/apps/rate-limiting/types/msgs_test.go new file mode 100644 index 00000000000..1bf1c194cfa --- /dev/null +++ b/modules/apps/rate-limiting/types/msgs_test.go @@ -0,0 +1,169 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +type MsgsTestSuite struct { + suite.Suite + + authority string + randomAddress string + validChannelID string + validClientID string +} + +func (s *MsgsTestSuite) SetupTest() { + s.authority = "cosmos10h9stc5v6ntgeygf5xf945njqq5h32r53uquvw" + s.randomAddress = "cosmos10h9stc5v6ntgeygf5xf945njqq5h32r53uquvw" + s.validChannelID = "channel-0" + s.validClientID = "07-tendermint-0" +} + +func TestMsgsTestSuite(t *testing.T) { + suite.Run(t, new(MsgsTestSuite)) +} + +func (s *MsgsTestSuite) TestMsgAddRateLimit() { + testCases := []struct { + name string + msg *types.MsgAddRateLimit + expPass bool + }{ + { + name: "valid add msg with channel id", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: true, + }, + { + name: "valid add msg with client id", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validClientID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: true, + }, + { + name: "invalid authority", + msg: &types.MsgAddRateLimit{ + Signer: "invalid", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "denom can't be empty", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "invalid client ID", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "invalid-client-id", + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "invalid channel ID", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "channel", + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "max percent send > 100", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(101), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "max percent recv > 100", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(101), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "send and recv both zero", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.ZeroInt(), + MaxPercentRecv: sdkmath.ZeroInt(), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "duration is zero hours", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 0, + }, + expPass: false, + }, + } + + for i, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + s.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + } else { + s.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } +} diff --git a/modules/apps/rate-limiting/types/query.pb.go b/modules/apps/rate-limiting/types/query.pb.go new file mode 100644 index 00000000000..8d91405093a --- /dev/null +++ b/modules/apps/rate-limiting/types/query.pb.go @@ -0,0 +1,2482 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Queries all rate limits +type QueryAllRateLimitsRequest struct { +} + +func (m *QueryAllRateLimitsRequest) Reset() { *m = QueryAllRateLimitsRequest{} } +func (m *QueryAllRateLimitsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsRequest) ProtoMessage() {} +func (*QueryAllRateLimitsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{0} +} +func (m *QueryAllRateLimitsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsRequest.Merge(m, src) +} +func (m *QueryAllRateLimitsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsRequest proto.InternalMessageInfo + +// QueryAllRateLimitsResponse returns all the rate limits stored on the chain. +type QueryAllRateLimitsResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryAllRateLimitsResponse) Reset() { *m = QueryAllRateLimitsResponse{} } +func (m *QueryAllRateLimitsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsResponse) ProtoMessage() {} +func (*QueryAllRateLimitsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{1} +} +func (m *QueryAllRateLimitsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsResponse.Merge(m, src) +} +func (m *QueryAllRateLimitsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsResponse proto.InternalMessageInfo + +func (m *QueryAllRateLimitsResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +// Queries a specific rate limit by channel ID and denom +type QueryRateLimitRequest struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelOrClientId string `protobuf:"bytes,2,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *QueryRateLimitRequest) Reset() { *m = QueryRateLimitRequest{} } +func (m *QueryRateLimitRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitRequest) ProtoMessage() {} +func (*QueryRateLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{2} +} +func (m *QueryRateLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitRequest.Merge(m, src) +} +func (m *QueryRateLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitRequest proto.InternalMessageInfo + +func (m *QueryRateLimitRequest) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *QueryRateLimitRequest) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// QueryRateLimitResponse returns a rate limit by denom and channel_or_client_id combination. +type QueryRateLimitResponse struct { + RateLimit *RateLimit `protobuf:"bytes,1,opt,name=rate_limit,json=rateLimit,proto3" json:"rate_limit,omitempty"` +} + +func (m *QueryRateLimitResponse) Reset() { *m = QueryRateLimitResponse{} } +func (m *QueryRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitResponse) ProtoMessage() {} +func (*QueryRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{3} +} +func (m *QueryRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitResponse.Merge(m, src) +} +func (m *QueryRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitResponse proto.InternalMessageInfo + +func (m *QueryRateLimitResponse) GetRateLimit() *RateLimit { + if m != nil { + return m.RateLimit + } + return nil +} + +// Queries all the rate limits for a given chain +type QueryRateLimitsByChainIDRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *QueryRateLimitsByChainIDRequest) Reset() { *m = QueryRateLimitsByChainIDRequest{} } +func (m *QueryRateLimitsByChainIDRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIDRequest) ProtoMessage() {} +func (*QueryRateLimitsByChainIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{4} +} +func (m *QueryRateLimitsByChainIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIDRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChainIDRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIDRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIDRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// QueryRateLimitsByChainIDResponse returns all rate-limits by a chain. +type QueryRateLimitsByChainIDResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChainIDResponse) Reset() { *m = QueryRateLimitsByChainIDResponse{} } +func (m *QueryRateLimitsByChainIDResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIDResponse) ProtoMessage() {} +func (*QueryRateLimitsByChainIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{5} +} +func (m *QueryRateLimitsByChainIDResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIDResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIDResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChainIDResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIDResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIDResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIDResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +// Queries all the rate limits for a given channel or client ID +type QueryRateLimitsByChannelOrClientIDRequest struct { + ChannelOrClientId string `protobuf:"bytes,1,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) Reset() { + *m = QueryRateLimitsByChannelOrClientIDRequest{} +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) String() string { + return proto.CompactTextString(m) +} +func (*QueryRateLimitsByChannelOrClientIDRequest) ProtoMessage() {} +func (*QueryRateLimitsByChannelOrClientIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{6} +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelOrClientIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelOrClientIDRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelOrClientIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelOrClientIDRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelOrClientIDRequest) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// QueryRateLimitsByChannelOrClientIDResponse returns all rate-limits by a channel or client id. +type QueryRateLimitsByChannelOrClientIDResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) Reset() { + *m = QueryRateLimitsByChannelOrClientIDResponse{} +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) String() string { + return proto.CompactTextString(m) +} +func (*QueryRateLimitsByChannelOrClientIDResponse) ProtoMessage() {} +func (*QueryRateLimitsByChannelOrClientIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{7} +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelOrClientIDResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelOrClientIDResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelOrClientIDResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelOrClientIDResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelOrClientIDResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +// Queries all blacklisted denoms +type QueryAllBlacklistedDenomsRequest struct { +} + +func (m *QueryAllBlacklistedDenomsRequest) Reset() { *m = QueryAllBlacklistedDenomsRequest{} } +func (m *QueryAllBlacklistedDenomsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllBlacklistedDenomsRequest) ProtoMessage() {} +func (*QueryAllBlacklistedDenomsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{8} +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllBlacklistedDenomsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllBlacklistedDenomsRequest.Merge(m, src) +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllBlacklistedDenomsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllBlacklistedDenomsRequest proto.InternalMessageInfo + +// QueryAllBlacklistedDenomsResponse returns all the blacklisted denosm. +type QueryAllBlacklistedDenomsResponse struct { + Denoms []string `protobuf:"bytes,1,rep,name=denoms,proto3" json:"denoms,omitempty"` +} + +func (m *QueryAllBlacklistedDenomsResponse) Reset() { *m = QueryAllBlacklistedDenomsResponse{} } +func (m *QueryAllBlacklistedDenomsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllBlacklistedDenomsResponse) ProtoMessage() {} +func (*QueryAllBlacklistedDenomsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{9} +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllBlacklistedDenomsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllBlacklistedDenomsResponse.Merge(m, src) +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllBlacklistedDenomsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllBlacklistedDenomsResponse proto.InternalMessageInfo + +func (m *QueryAllBlacklistedDenomsResponse) GetDenoms() []string { + if m != nil { + return m.Denoms + } + return nil +} + +// Queries all whitelisted address pairs +type QueryAllWhitelistedAddressesRequest struct { +} + +func (m *QueryAllWhitelistedAddressesRequest) Reset() { *m = QueryAllWhitelistedAddressesRequest{} } +func (m *QueryAllWhitelistedAddressesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllWhitelistedAddressesRequest) ProtoMessage() {} +func (*QueryAllWhitelistedAddressesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{10} +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllWhitelistedAddressesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllWhitelistedAddressesRequest.Merge(m, src) +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllWhitelistedAddressesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllWhitelistedAddressesRequest proto.InternalMessageInfo + +// QueryAllWhitelistedAddressesResponse returns all whitelisted pairs. +type QueryAllWhitelistedAddressesResponse struct { + AddressPairs []WhitelistedAddressPair `protobuf:"bytes,1,rep,name=address_pairs,json=addressPairs,proto3" json:"address_pairs"` +} + +func (m *QueryAllWhitelistedAddressesResponse) Reset() { *m = QueryAllWhitelistedAddressesResponse{} } +func (m *QueryAllWhitelistedAddressesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllWhitelistedAddressesResponse) ProtoMessage() {} +func (*QueryAllWhitelistedAddressesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{11} +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllWhitelistedAddressesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllWhitelistedAddressesResponse.Merge(m, src) +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllWhitelistedAddressesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllWhitelistedAddressesResponse proto.InternalMessageInfo + +func (m *QueryAllWhitelistedAddressesResponse) GetAddressPairs() []WhitelistedAddressPair { + if m != nil { + return m.AddressPairs + } + return nil +} + +func init() { + proto.RegisterType((*QueryAllRateLimitsRequest)(nil), "ibc.applications.rate_limiting.v1.QueryAllRateLimitsRequest") + proto.RegisterType((*QueryAllRateLimitsResponse)(nil), "ibc.applications.rate_limiting.v1.QueryAllRateLimitsResponse") + proto.RegisterType((*QueryRateLimitRequest)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitRequest") + proto.RegisterType((*QueryRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitResponse") + proto.RegisterType((*QueryRateLimitsByChainIDRequest)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitsByChainIDRequest") + proto.RegisterType((*QueryRateLimitsByChainIDResponse)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitsByChainIDResponse") + proto.RegisterType((*QueryRateLimitsByChannelOrClientIDRequest)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitsByChannelOrClientIDRequest") + proto.RegisterType((*QueryRateLimitsByChannelOrClientIDResponse)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitsByChannelOrClientIDResponse") + proto.RegisterType((*QueryAllBlacklistedDenomsRequest)(nil), "ibc.applications.rate_limiting.v1.QueryAllBlacklistedDenomsRequest") + proto.RegisterType((*QueryAllBlacklistedDenomsResponse)(nil), "ibc.applications.rate_limiting.v1.QueryAllBlacklistedDenomsResponse") + proto.RegisterType((*QueryAllWhitelistedAddressesRequest)(nil), "ibc.applications.rate_limiting.v1.QueryAllWhitelistedAddressesRequest") + proto.RegisterType((*QueryAllWhitelistedAddressesResponse)(nil), "ibc.applications.rate_limiting.v1.QueryAllWhitelistedAddressesResponse") +} + +func init() { + proto.RegisterFile("ibc/applications/rate_limiting/v1/query.proto", fileDescriptor_f55a91bf266ae0f7) +} + +var fileDescriptor_f55a91bf266ae0f7 = []byte{ + // 742 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x41, 0x4f, 0x13, 0x4d, + 0x18, 0xee, 0xf2, 0x7d, 0xf0, 0x7d, 0x7d, 0x91, 0x83, 0x63, 0x45, 0x58, 0xb5, 0xc0, 0x2a, 0x11, + 0x8d, 0xed, 0x0a, 0xc6, 0x28, 0x0a, 0x31, 0xb4, 0x04, 0x25, 0x22, 0x62, 0x4d, 0x24, 0x31, 0xc6, + 0x66, 0xba, 0x3b, 0x69, 0x27, 0x6e, 0x77, 0x96, 0x9d, 0x2d, 0xa4, 0x31, 0x1c, 0xf4, 0xcc, 0xc1, + 0xc4, 0x9f, 0xe3, 0x1f, 0xe0, 0x48, 0xe2, 0x85, 0x93, 0x31, 0xc5, 0x93, 0x07, 0x7f, 0x83, 0xd9, + 0xd9, 0xe9, 0x96, 0xc2, 0xb6, 0xb4, 0x45, 0x6e, 0xdd, 0x9d, 0xf7, 0x7d, 0xde, 0xe7, 0x79, 0xf6, + 0x9d, 0x27, 0x85, 0x14, 0x2d, 0x18, 0x3a, 0x76, 0x1c, 0x8b, 0x1a, 0xd8, 0xa3, 0xcc, 0xe6, 0xba, + 0x8b, 0x3d, 0x92, 0xb7, 0x68, 0x99, 0x7a, 0xd4, 0x2e, 0xea, 0x9b, 0xd3, 0xfa, 0x46, 0x85, 0xb8, + 0xd5, 0xb4, 0xe3, 0x32, 0x8f, 0xa1, 0x09, 0x5a, 0x30, 0xd2, 0x87, 0xcb, 0xd3, 0x4d, 0xe5, 0xe9, + 0xcd, 0x69, 0x35, 0x51, 0x64, 0x45, 0x26, 0xaa, 0x75, 0xff, 0x57, 0xd0, 0xa8, 0x5e, 0x29, 0x32, + 0x56, 0xb4, 0x88, 0x8e, 0x1d, 0xaa, 0x63, 0xdb, 0x66, 0x9e, 0x6c, 0x0f, 0x4e, 0xef, 0x9d, 0xcc, + 0xa2, 0x79, 0x8e, 0x68, 0xd3, 0x2e, 0xc3, 0xe8, 0x4b, 0x9f, 0xdc, 0x82, 0x65, 0xe5, 0xb0, 0x47, + 0x56, 0xfc, 0x53, 0x9e, 0x23, 0x1b, 0x15, 0xc2, 0x3d, 0x6d, 0x03, 0xd4, 0xa8, 0x43, 0xee, 0x30, + 0x9b, 0x13, 0xf4, 0x0a, 0x06, 0x1b, 0x88, 0x7c, 0x44, 0x19, 0xff, 0x67, 0x6a, 0x70, 0xe6, 0x76, + 0xfa, 0x44, 0x79, 0xe9, 0x10, 0x2b, 0xf3, 0xef, 0xee, 0xf7, 0xb1, 0x58, 0x0e, 0xdc, 0x10, 0x5c, + 0x7b, 0x07, 0x17, 0xc5, 0xc8, 0xb0, 0x46, 0x72, 0x41, 0x09, 0xe8, 0x37, 0x89, 0xcd, 0xca, 0x23, + 0xca, 0xb8, 0x32, 0x15, 0xcf, 0x05, 0x0f, 0x48, 0x87, 0x84, 0x51, 0xc2, 0xb6, 0x4d, 0xac, 0x3c, + 0x73, 0xf3, 0x86, 0x45, 0x89, 0xed, 0xe5, 0xa9, 0x39, 0xd2, 0x27, 0x8a, 0xce, 0xcb, 0xb3, 0x17, + 0x6e, 0x56, 0x9c, 0x2c, 0x9b, 0x1a, 0x81, 0xe1, 0xa3, 0xf8, 0x52, 0xce, 0x33, 0x80, 0x06, 0x53, + 0x31, 0xa5, 0x4b, 0x35, 0xb9, 0x78, 0xa8, 0x43, 0x9b, 0x83, 0xb1, 0xe6, 0x31, 0x3c, 0x53, 0xcd, + 0x96, 0x30, 0xb5, 0x97, 0x17, 0xeb, 0x82, 0x46, 0xe1, 0x7f, 0xc3, 0x7f, 0xe3, 0xd3, 0x0d, 0x34, + 0xfd, 0x27, 0x9e, 0x97, 0x4d, 0x6d, 0x0b, 0xc6, 0x5b, 0x77, 0x9f, 0xa5, 0xfb, 0x6f, 0xe1, 0x66, + 0xd4, 0xe0, 0x26, 0x0f, 0x43, 0x01, 0xad, 0xbc, 0x57, 0x5a, 0x79, 0xff, 0x51, 0x81, 0x5b, 0x9d, + 0xc0, 0x9f, 0xa5, 0x42, 0x4d, 0x5a, 0xbb, 0x60, 0x59, 0x19, 0x0b, 0x1b, 0xef, 0x2d, 0xca, 0x3d, + 0x62, 0x2e, 0xfa, 0xcb, 0x14, 0xae, 0xfd, 0x23, 0x98, 0x68, 0x53, 0x23, 0xd9, 0x0d, 0xc3, 0x80, + 0x58, 0xc1, 0x80, 0x58, 0x3c, 0x27, 0x9f, 0xb4, 0x49, 0xb8, 0x56, 0x6f, 0x5e, 0x2f, 0x51, 0x8f, + 0x04, 0xcd, 0x0b, 0xa6, 0xe9, 0x12, 0xce, 0x49, 0x38, 0x63, 0x47, 0x81, 0xeb, 0xed, 0xeb, 0xe4, + 0x1c, 0x13, 0x86, 0x70, 0xf0, 0x32, 0xef, 0x60, 0xea, 0xd6, 0x7d, 0x98, 0xed, 0xc0, 0x87, 0xe3, + 0xb8, 0x6b, 0x98, 0xba, 0xd2, 0x94, 0x73, 0xb8, 0xf1, 0x8a, 0xcf, 0xfc, 0x02, 0xe8, 0x17, 0x74, + 0xd0, 0x57, 0x05, 0x86, 0x9a, 0xee, 0x3b, 0x9a, 0xeb, 0x60, 0x54, 0xcb, 0x0c, 0x51, 0xe7, 0x7b, + 0xec, 0x0e, 0xe4, 0x6b, 0xa9, 0x4f, 0xdf, 0x7e, 0x7e, 0xe9, 0xbb, 0x81, 0x26, 0x75, 0x99, 0x6f, + 0x41, 0xae, 0xa5, 0x8e, 0xe6, 0x5a, 0xb0, 0x24, 0x68, 0x5f, 0x81, 0x78, 0x88, 0x82, 0x1e, 0x74, + 0x3a, 0xfb, 0x68, 0xda, 0xa8, 0xb3, 0x3d, 0x74, 0x4a, 0xc6, 0xaf, 0x05, 0xe3, 0x35, 0xb4, 0xda, + 0x09, 0xe3, 0x43, 0xbf, 0x3e, 0x44, 0x5d, 0xa6, 0x6d, 0xbd, 0x50, 0xcd, 0x07, 0x51, 0x57, 0x53, + 0xe0, 0x42, 0x44, 0x20, 0xa0, 0x4c, 0xd7, 0x54, 0x8f, 0x65, 0x91, 0x9a, 0x3d, 0x15, 0x86, 0x14, + 0x9e, 0x11, 0xc2, 0xe7, 0xd0, 0xc3, 0xee, 0x84, 0x73, 0xa1, 0x5c, 0xe4, 0xe0, 0x36, 0xda, 0xe9, + 0x83, 0xab, 0x6d, 0xd3, 0x01, 0xad, 0xf4, 0x48, 0x35, 0x32, 0xc3, 0xd4, 0xe7, 0x7f, 0x09, 0x4d, + 0x5a, 0xb0, 0x2a, 0x2c, 0x78, 0x8a, 0x96, 0x7a, 0xb1, 0xe0, 0xf8, 0xc7, 0xf7, 0xbf, 0x79, 0x22, + 0x2a, 0x85, 0x50, 0xb6, 0x8b, 0x5b, 0xd5, 0x2a, 0xe7, 0xd4, 0xc5, 0xd3, 0x81, 0x48, 0xcd, 0x8f, + 0x85, 0xe6, 0x59, 0x74, 0xbf, 0x23, 0xcd, 0x85, 0x06, 0x4e, 0xb0, 0xd7, 0x1c, 0xfd, 0x56, 0xe0, + 0x52, 0x8b, 0x14, 0x44, 0x4b, 0x5d, 0x50, 0x6c, 0x13, 0xb7, 0xea, 0x93, 0x53, 0xe3, 0xf4, 0xb4, + 0xe4, 0x5b, 0x0d, 0xa8, 0x3c, 0xae, 0x63, 0x65, 0xd6, 0x77, 0x6b, 0x49, 0x65, 0xaf, 0x96, 0x54, + 0x7e, 0xd4, 0x92, 0xca, 0xe7, 0x83, 0x64, 0x6c, 0xef, 0x20, 0x19, 0xdb, 0x3f, 0x48, 0xc6, 0xde, + 0xcc, 0x17, 0xa9, 0x57, 0xaa, 0x14, 0xd2, 0x06, 0x2b, 0xeb, 0x06, 0xe3, 0x65, 0xc6, 0xfd, 0x31, + 0xa9, 0x22, 0xd3, 0x37, 0xa7, 0xef, 0xe8, 0x65, 0x66, 0x56, 0x2c, 0xc2, 0xa3, 0xa6, 0x7a, 0x55, + 0x87, 0xf0, 0xc2, 0x80, 0xf8, 0x4f, 0x77, 0xf7, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0xfa, + 0xfe, 0xd6, 0x92, 0x0a, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Queries all rate limits + AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) + // Queries a specific rate limit by channel ID and denom + // Ex: + // - /ratelimit/{channel_or_client_id}/by_denom?denom={denom} + RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) + // Queries all the rate limits for a given chain + RateLimitsByChainID(ctx context.Context, in *QueryRateLimitsByChainIDRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIDResponse, error) + // Queries all the rate limits for a given channel ID + RateLimitsByChannelOrClientID(ctx context.Context, in *QueryRateLimitsByChannelOrClientIDRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelOrClientIDResponse, error) + // Queries all blacklisted denoms + AllBlacklistedDenoms(ctx context.Context, in *QueryAllBlacklistedDenomsRequest, opts ...grpc.CallOption) (*QueryAllBlacklistedDenomsResponse, error) + // Queries all whitelisted address pairs + AllWhitelistedAddresses(ctx context.Context, in *QueryAllWhitelistedAddressesRequest, opts ...grpc.CallOption) (*QueryAllWhitelistedAddressesResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) { + out := new(QueryAllRateLimitsResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/AllRateLimits", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) { + out := new(QueryRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/RateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChainID(ctx context.Context, in *QueryRateLimitsByChainIDRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIDResponse, error) { + out := new(QueryRateLimitsByChainIDResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/RateLimitsByChainID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChannelOrClientID(ctx context.Context, in *QueryRateLimitsByChannelOrClientIDRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelOrClientIDResponse, error) { + out := new(QueryRateLimitsByChannelOrClientIDResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/RateLimitsByChannelOrClientID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AllBlacklistedDenoms(ctx context.Context, in *QueryAllBlacklistedDenomsRequest, opts ...grpc.CallOption) (*QueryAllBlacklistedDenomsResponse, error) { + out := new(QueryAllBlacklistedDenomsResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/AllBlacklistedDenoms", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AllWhitelistedAddresses(ctx context.Context, in *QueryAllWhitelistedAddressesRequest, opts ...grpc.CallOption) (*QueryAllWhitelistedAddressesResponse, error) { + out := new(QueryAllWhitelistedAddressesResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/AllWhitelistedAddresses", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Queries all rate limits + AllRateLimits(context.Context, *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) + // Queries a specific rate limit by channel ID and denom + // Ex: + // - /ratelimit/{channel_or_client_id}/by_denom?denom={denom} + RateLimit(context.Context, *QueryRateLimitRequest) (*QueryRateLimitResponse, error) + // Queries all the rate limits for a given chain + RateLimitsByChainID(context.Context, *QueryRateLimitsByChainIDRequest) (*QueryRateLimitsByChainIDResponse, error) + // Queries all the rate limits for a given channel ID + RateLimitsByChannelOrClientID(context.Context, *QueryRateLimitsByChannelOrClientIDRequest) (*QueryRateLimitsByChannelOrClientIDResponse, error) + // Queries all blacklisted denoms + AllBlacklistedDenoms(context.Context, *QueryAllBlacklistedDenomsRequest) (*QueryAllBlacklistedDenomsResponse, error) + // Queries all whitelisted address pairs + AllWhitelistedAddresses(context.Context, *QueryAllWhitelistedAddressesRequest) (*QueryAllWhitelistedAddressesResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) AllRateLimits(ctx context.Context, req *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllRateLimits not implemented") +} +func (*UnimplementedQueryServer) RateLimit(ctx context.Context, req *QueryRateLimitRequest) (*QueryRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimit not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChainID(ctx context.Context, req *QueryRateLimitsByChainIDRequest) (*QueryRateLimitsByChainIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChainID not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChannelOrClientID(ctx context.Context, req *QueryRateLimitsByChannelOrClientIDRequest) (*QueryRateLimitsByChannelOrClientIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChannelOrClientID not implemented") +} +func (*UnimplementedQueryServer) AllBlacklistedDenoms(ctx context.Context, req *QueryAllBlacklistedDenomsRequest) (*QueryAllBlacklistedDenomsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllBlacklistedDenoms not implemented") +} +func (*UnimplementedQueryServer) AllWhitelistedAddresses(ctx context.Context, req *QueryAllWhitelistedAddressesRequest) (*QueryAllWhitelistedAddressesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllWhitelistedAddresses not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_AllRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllRateLimitsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllRateLimits(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/AllRateLimits", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllRateLimits(ctx, req.(*QueryAllRateLimitsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/RateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimit(ctx, req.(*QueryRateLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChainID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChainIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChainID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/RateLimitsByChainID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChainID(ctx, req.(*QueryRateLimitsByChainIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChannelOrClientID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChannelOrClientIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChannelOrClientID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/RateLimitsByChannelOrClientID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChannelOrClientID(ctx, req.(*QueryRateLimitsByChannelOrClientIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AllBlacklistedDenoms_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllBlacklistedDenomsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllBlacklistedDenoms(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/AllBlacklistedDenoms", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllBlacklistedDenoms(ctx, req.(*QueryAllBlacklistedDenomsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AllWhitelistedAddresses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllWhitelistedAddressesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllWhitelistedAddresses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/AllWhitelistedAddresses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllWhitelistedAddresses(ctx, req.(*QueryAllWhitelistedAddressesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var Query_serviceDesc = _Query_serviceDesc +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibc.applications.rate_limiting.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AllRateLimits", + Handler: _Query_AllRateLimits_Handler, + }, + { + MethodName: "RateLimit", + Handler: _Query_RateLimit_Handler, + }, + { + MethodName: "RateLimitsByChainID", + Handler: _Query_RateLimitsByChainID_Handler, + }, + { + MethodName: "RateLimitsByChannelOrClientID", + Handler: _Query_RateLimitsByChannelOrClientID_Handler, + }, + { + MethodName: "AllBlacklistedDenoms", + Handler: _Query_AllBlacklistedDenoms_Handler, + }, + { + MethodName: "AllWhitelistedAddresses", + Handler: _Query_AllWhitelistedAddresses_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibc/applications/rate_limiting/v1/query.proto", +} + +func (m *QueryAllRateLimitsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllRateLimitsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RateLimit != nil { + { + size, err := m.RateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIDRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIDResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIDResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIDResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryAllBlacklistedDenomsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllBlacklistedDenomsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllBlacklistedDenomsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllBlacklistedDenomsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllBlacklistedDenomsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllBlacklistedDenomsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Denoms) > 0 { + for iNdEx := len(m.Denoms) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Denoms[iNdEx]) + copy(dAtA[i:], m.Denoms[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denoms[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryAllWhitelistedAddressesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllWhitelistedAddressesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllWhitelistedAddressesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllWhitelistedAddressesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllWhitelistedAddressesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllWhitelistedAddressesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AddressPairs) > 0 { + for iNdEx := len(m.AddressPairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AddressPairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryAllRateLimitsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllRateLimitsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RateLimit != nil { + l = m.RateLimit.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIDRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIDResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryAllBlacklistedDenomsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllBlacklistedDenomsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Denoms) > 0 { + for _, s := range m.Denoms { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryAllWhitelistedAddressesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllWhitelistedAddressesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AddressPairs) > 0 { + for _, e := range m.AddressPairs { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryAllRateLimitsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllRateLimitsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RateLimit == nil { + m.RateLimit = &RateLimit{} + } + if err := m.RateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIDResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIDResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIDResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelOrClientIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelOrClientIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelOrClientIDResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelOrClientIDResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllBlacklistedDenomsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllBlacklistedDenomsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllBlacklistedDenomsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllBlacklistedDenomsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllBlacklistedDenomsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllBlacklistedDenomsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denoms", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denoms = append(m.Denoms, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllWhitelistedAddressesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllWhitelistedAddressesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressPairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressPairs = append(m.AddressPairs, WhitelistedAddressPair{}) + if err := m.AddressPairs[len(m.AddressPairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/rate-limiting/types/query.pb.gw.go b/modules/apps/rate-limiting/types/query.pb.gw.go new file mode 100644 index 00000000000..1d6baa9b321 --- /dev/null +++ b/modules/apps/rate-limiting/types/query.pb.gw.go @@ -0,0 +1,604 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllRateLimits(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_RateLimit_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_or_client_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_or_client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_or_client_id") + } + + protoReq.ChannelOrClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_or_client_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RateLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_or_client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_or_client_id") + } + + protoReq.ChannelOrClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_or_client_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.RateLimit(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChainID_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := client.RateLimitsByChainID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChainID_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := server.RateLimitsByChainID(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChannelOrClientID_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelOrClientIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_or_client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_or_client_id") + } + + protoReq.ChannelOrClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_or_client_id", err) + } + + msg, err := client.RateLimitsByChannelOrClientID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChannelOrClientID_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelOrClientIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_or_client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_or_client_id") + } + + protoReq.ChannelOrClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_or_client_id", err) + } + + msg, err := server.RateLimitsByChannelOrClientID(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_AllBlacklistedDenoms_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllBlacklistedDenomsRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllBlacklistedDenoms(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllBlacklistedDenoms_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllBlacklistedDenomsRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllBlacklistedDenoms(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_AllWhitelistedAddresses_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllWhitelistedAddressesRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllWhitelistedAddresses(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllWhitelistedAddresses_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllWhitelistedAddressesRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllWhitelistedAddresses(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllRateLimits_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChainID_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelOrClientID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChannelOrClientID_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelOrClientID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllBlacklistedDenoms_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllBlacklistedDenoms_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllBlacklistedDenoms_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllWhitelistedAddresses_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllWhitelistedAddresses_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllWhitelistedAddresses_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllRateLimits_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChainID_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelOrClientID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChannelOrClientID_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelOrClientID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllBlacklistedDenoms_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllBlacklistedDenoms_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllBlacklistedDenoms_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllWhitelistedAddresses_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllWhitelistedAddresses_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllWhitelistedAddresses_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_AllRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimits"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "channel_or_client_id", "by_denom"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChainID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "ratelimits", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChannelOrClientID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "ratelimits", "channel_or_client_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_AllBlacklistedDenoms_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "blacklisted_denoms"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_AllWhitelistedAddresses_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "whitelisted_addresses"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_AllRateLimits_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimit_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChainID_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChannelOrClientID_0 = runtime.ForwardResponseMessage + + forward_Query_AllBlacklistedDenoms_0 = runtime.ForwardResponseMessage + + forward_Query_AllWhitelistedAddresses_0 = runtime.ForwardResponseMessage +) diff --git a/modules/apps/rate-limiting/types/quota.go b/modules/apps/rate-limiting/types/quota.go new file mode 100644 index 00000000000..ac89e1713c4 --- /dev/null +++ b/modules/apps/rate-limiting/types/quota.go @@ -0,0 +1,23 @@ +package types + +import ( + sdkmath "cosmossdk.io/math" +) + +// CheckExceedsQuota checks if new in/out flow is going to reach the max in/out or not +func (q *Quota) CheckExceedsQuota(direction PacketDirection, amount sdkmath.Int, totalValue sdkmath.Int) bool { + // If there's no channel value (this should be almost impossible), it means there is no + // supply of the asset, so we shouldn't prevent inflows/outflows + if totalValue.IsZero() { + return false + } + var threshold sdkmath.Int + if direction == PACKET_RECV { + threshold = totalValue.Mul(q.MaxPercentRecv).Quo(sdkmath.NewInt(100)) + } else { + threshold = totalValue.Mul(q.MaxPercentSend).Quo(sdkmath.NewInt(100)) + } + + // Revert to GT check as in the original reference module + return amount.GT(threshold) +} diff --git a/modules/apps/rate-limiting/types/quota_test.go b/modules/apps/rate-limiting/types/quota_test.go new file mode 100644 index 00000000000..d441b7609b9 --- /dev/null +++ b/modules/apps/rate-limiting/types/quota_test.go @@ -0,0 +1,80 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func TestCheckExceedsQuota(t *testing.T) { + totalValue := sdkmath.NewInt(100) + amountUnderThreshold := sdkmath.NewInt(5) + amountOverThreshold := sdkmath.NewInt(15) + quota := types.Quota{ + MaxPercentRecv: sdkmath.NewInt(10), + MaxPercentSend: sdkmath.NewInt(10), + DurationHours: uint64(1), + } + + tests := []struct { + name string + direction types.PacketDirection + amount sdkmath.Int + totalValue sdkmath.Int + exceeded bool + }{ + { + name: "inflow exceeded threshold", + direction: types.PACKET_RECV, + amount: amountOverThreshold, + totalValue: totalValue, + exceeded: true, + }, + { + name: "inflow did not exceed threshold", + direction: types.PACKET_RECV, + amount: amountUnderThreshold, + totalValue: totalValue, + exceeded: false, + }, + { + name: "outflow exceeded threshold", + direction: types.PACKET_SEND, + amount: amountOverThreshold, + totalValue: totalValue, + exceeded: true, + }, + { + name: "outflow did not exceed threshold", + direction: types.PACKET_SEND, + amount: amountUnderThreshold, + totalValue: totalValue, + exceeded: false, + }, + { + name: "zero channel value send", + direction: types.PACKET_SEND, + amount: amountOverThreshold, + totalValue: sdkmath.ZeroInt(), + exceeded: false, + }, + { + name: "zero channel value recv", + direction: types.PACKET_RECV, + amount: amountOverThreshold, + totalValue: sdkmath.ZeroInt(), + exceeded: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := quota.CheckExceedsQuota(test.direction, test.amount, test.totalValue) + require.Equal(t, res, test.exceeded, "test: %s", test.name) + }) + } +} diff --git a/modules/apps/rate-limiting/types/rate_limiting.pb.go b/modules/apps/rate-limiting/types/rate_limiting.pb.go new file mode 100644 index 00000000000..b6f43336a86 --- /dev/null +++ b/modules/apps/rate-limiting/types/rate_limiting.pb.go @@ -0,0 +1,1779 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/rate_limiting.proto + +package types + +import ( + cosmossdk_io_math "cosmossdk.io/math" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// PacketDirection defines whether the transfer packet is being sent from +// this chain or is being received on this chain +type PacketDirection int32 + +const ( + PACKET_SEND PacketDirection = 0 + PACKET_RECV PacketDirection = 1 +) + +var PacketDirection_name = map[int32]string{ + 0: "PACKET_SEND", + 1: "PACKET_RECV", +} + +var PacketDirection_value = map[string]int32{ + "PACKET_SEND": 0, + "PACKET_RECV": 1, +} + +func (x PacketDirection) String() string { + return proto.EnumName(PacketDirection_name, int32(x)) +} + +func (PacketDirection) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{0} +} + +// Path holds the denom and channelID that define the rate limited route +type Path struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelOrClientId string `protobuf:"bytes,2,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} +func (*Path) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{0} +} +func (m *Path) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Path.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path.Merge(m, src) +} +func (m *Path) XXX_Size() int { + return m.Size() +} +func (m *Path) XXX_DiscardUnknown() { + xxx_messageInfo_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Path proto.InternalMessageInfo + +func (m *Path) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *Path) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// Quota defines the rate limit thresholds for transfer packets +type Quota struct { + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentSend cosmossdk_io_math.Int `protobuf:"bytes,1,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_send"` + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentRecv cosmossdk_io_math.Int `protobuf:"bytes,2,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_recv"` + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + DurationHours uint64 `protobuf:"varint,3,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *Quota) Reset() { *m = Quota{} } +func (m *Quota) String() string { return proto.CompactTextString(m) } +func (*Quota) ProtoMessage() {} +func (*Quota) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{1} +} +func (m *Quota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quota.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quota) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quota.Merge(m, src) +} +func (m *Quota) XXX_Size() int { + return m.Size() +} +func (m *Quota) XXX_DiscardUnknown() { + xxx_messageInfo_Quota.DiscardUnknown(m) +} + +var xxx_messageInfo_Quota proto.InternalMessageInfo + +func (m *Quota) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +// Flow tracks all the inflows and outflows of a channel. +type Flow struct { + // Inflow defines the total amount of inbound transfers for the given + // rate limit in the current window + Inflow cosmossdk_io_math.Int `protobuf:"bytes,1,opt,name=inflow,proto3,customtype=cosmossdk.io/math.Int" json:"inflow"` + // Outflow defines the total amount of outbound transfers for the given + // rate limit in the current window + Outflow cosmossdk_io_math.Int `protobuf:"bytes,2,opt,name=outflow,proto3,customtype=cosmossdk.io/math.Int" json:"outflow"` + // ChannelValue stores the total supply of the denom at the start of + // the rate limit. This is used as the denominator when checking + // the rate limit threshold + // The ChannelValue is fixed for the duration of the rate limit window + ChannelValue cosmossdk_io_math.Int `protobuf:"bytes,3,opt,name=channel_value,json=channelValue,proto3,customtype=cosmossdk.io/math.Int" json:"channel_value"` +} + +func (m *Flow) Reset() { *m = Flow{} } +func (m *Flow) String() string { return proto.CompactTextString(m) } +func (*Flow) ProtoMessage() {} +func (*Flow) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{2} +} +func (m *Flow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Flow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Flow.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Flow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Flow.Merge(m, src) +} +func (m *Flow) XXX_Size() int { + return m.Size() +} +func (m *Flow) XXX_DiscardUnknown() { + xxx_messageInfo_Flow.DiscardUnknown(m) +} + +var xxx_messageInfo_Flow proto.InternalMessageInfo + +// RateLimit stores all the context about a given rate limit, including +// the relevant denom and channel, rate limit thresholds, and current +// progress towards the limits +type RateLimit struct { + Path *Path `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Quota *Quota `protobuf:"bytes,2,opt,name=quota,proto3" json:"quota,omitempty"` + Flow *Flow `protobuf:"bytes,3,opt,name=flow,proto3" json:"flow,omitempty"` +} + +func (m *RateLimit) Reset() { *m = RateLimit{} } +func (m *RateLimit) String() string { return proto.CompactTextString(m) } +func (*RateLimit) ProtoMessage() {} +func (*RateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{3} +} +func (m *RateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimit.Merge(m, src) +} +func (m *RateLimit) XXX_Size() int { + return m.Size() +} +func (m *RateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimit proto.InternalMessageInfo + +func (m *RateLimit) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +func (m *RateLimit) GetQuota() *Quota { + if m != nil { + return m.Quota + } + return nil +} + +func (m *RateLimit) GetFlow() *Flow { + if m != nil { + return m.Flow + } + return nil +} + +// WhitelistedAddressPair represents a sender-receiver combo that is +// not subject to rate limit restrictions +type WhitelistedAddressPair struct { + Sender string `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"` + Receiver string `protobuf:"bytes,2,opt,name=receiver,proto3" json:"receiver,omitempty"` +} + +func (m *WhitelistedAddressPair) Reset() { *m = WhitelistedAddressPair{} } +func (m *WhitelistedAddressPair) String() string { return proto.CompactTextString(m) } +func (*WhitelistedAddressPair) ProtoMessage() {} +func (*WhitelistedAddressPair) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{4} +} +func (m *WhitelistedAddressPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WhitelistedAddressPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WhitelistedAddressPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WhitelistedAddressPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_WhitelistedAddressPair.Merge(m, src) +} +func (m *WhitelistedAddressPair) XXX_Size() int { + return m.Size() +} +func (m *WhitelistedAddressPair) XXX_DiscardUnknown() { + xxx_messageInfo_WhitelistedAddressPair.DiscardUnknown(m) +} + +var xxx_messageInfo_WhitelistedAddressPair proto.InternalMessageInfo + +func (m *WhitelistedAddressPair) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *WhitelistedAddressPair) GetReceiver() string { + if m != nil { + return m.Receiver + } + return "" +} + +// HourEpoch is the epoch type. +type HourEpoch struct { + EpochNumber uint64 `protobuf:"varint,1,opt,name=epoch_number,json=epochNumber,proto3" json:"epoch_number,omitempty"` + Duration time.Duration `protobuf:"bytes,2,opt,name=duration,proto3,stdduration" json:"duration,omitempty"` + EpochStartTime time.Time `protobuf:"bytes,3,opt,name=epoch_start_time,json=epochStartTime,proto3,stdtime" json:"epoch_start_time"` + EpochStartHeight int64 `protobuf:"varint,4,opt,name=epoch_start_height,json=epochStartHeight,proto3" json:"epoch_start_height,omitempty"` +} + +func (m *HourEpoch) Reset() { *m = HourEpoch{} } +func (m *HourEpoch) String() string { return proto.CompactTextString(m) } +func (*HourEpoch) ProtoMessage() {} +func (*HourEpoch) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{5} +} +func (m *HourEpoch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HourEpoch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HourEpoch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HourEpoch) XXX_Merge(src proto.Message) { + xxx_messageInfo_HourEpoch.Merge(m, src) +} +func (m *HourEpoch) XXX_Size() int { + return m.Size() +} +func (m *HourEpoch) XXX_DiscardUnknown() { + xxx_messageInfo_HourEpoch.DiscardUnknown(m) +} + +var xxx_messageInfo_HourEpoch proto.InternalMessageInfo + +func (m *HourEpoch) GetEpochNumber() uint64 { + if m != nil { + return m.EpochNumber + } + return 0 +} + +func (m *HourEpoch) GetDuration() time.Duration { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *HourEpoch) GetEpochStartTime() time.Time { + if m != nil { + return m.EpochStartTime + } + return time.Time{} +} + +func (m *HourEpoch) GetEpochStartHeight() int64 { + if m != nil { + return m.EpochStartHeight + } + return 0 +} + +func init() { + proto.RegisterEnum("ibc.applications.rate_limiting.v1.PacketDirection", PacketDirection_name, PacketDirection_value) + proto.RegisterType((*Path)(nil), "ibc.applications.rate_limiting.v1.Path") + proto.RegisterType((*Quota)(nil), "ibc.applications.rate_limiting.v1.Quota") + proto.RegisterType((*Flow)(nil), "ibc.applications.rate_limiting.v1.Flow") + proto.RegisterType((*RateLimit)(nil), "ibc.applications.rate_limiting.v1.RateLimit") + proto.RegisterType((*WhitelistedAddressPair)(nil), "ibc.applications.rate_limiting.v1.WhitelistedAddressPair") + proto.RegisterType((*HourEpoch)(nil), "ibc.applications.rate_limiting.v1.HourEpoch") +} + +func init() { + proto.RegisterFile("ibc/applications/rate_limiting/v1/rate_limiting.proto", fileDescriptor_bf22d2adece00654) +} + +var fileDescriptor_bf22d2adece00654 = []byte{ + // 709 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4f, 0xd4, 0x40, + 0x14, 0xde, 0xc2, 0x82, 0x30, 0xcb, 0x8f, 0x75, 0x82, 0x64, 0xdd, 0xc4, 0x2e, 0x6c, 0x62, 0xdc, + 0x18, 0x68, 0x05, 0x43, 0x8c, 0x31, 0x9a, 0xb0, 0xb0, 0x0a, 0x11, 0x71, 0x2d, 0x08, 0x89, 0x97, + 0x66, 0x76, 0x3a, 0xb4, 0x13, 0xda, 0x4e, 0x9d, 0x4e, 0x17, 0x38, 0x7b, 0xf1, 0xc8, 0xd1, 0xbb, + 0xff, 0x85, 0x27, 0x2f, 0x26, 0x1c, 0x39, 0x1a, 0x0f, 0x68, 0xe0, 0xe6, 0x5f, 0x61, 0x66, 0xda, + 0x22, 0xe0, 0x81, 0xf5, 0x36, 0xf3, 0xde, 0xf7, 0x7d, 0xf3, 0xe6, 0xcd, 0xf7, 0x06, 0x2c, 0xd0, + 0x0e, 0x36, 0x51, 0x14, 0xf9, 0x14, 0x23, 0x41, 0x59, 0x18, 0x9b, 0x1c, 0x09, 0x62, 0xfb, 0x34, + 0xa0, 0x82, 0x86, 0xae, 0xd9, 0x9d, 0xbb, 0x1c, 0x30, 0x22, 0xce, 0x04, 0x83, 0xd3, 0xb4, 0x83, + 0x8d, 0x8b, 0x34, 0xe3, 0x32, 0xaa, 0x3b, 0x57, 0x9d, 0x70, 0x99, 0xcb, 0x14, 0xda, 0x94, 0xab, + 0x94, 0x58, 0xd5, 0x5d, 0xc6, 0x5c, 0x9f, 0x98, 0x6a, 0xd7, 0x49, 0x76, 0x4c, 0x27, 0xe1, 0x4a, + 0x21, 0xcb, 0xd7, 0xae, 0xe6, 0x05, 0x0d, 0x48, 0x2c, 0x50, 0x10, 0xa5, 0x80, 0xfa, 0x2b, 0x50, + 0x6c, 0x23, 0xe1, 0xc1, 0x09, 0x30, 0xe0, 0x90, 0x90, 0x05, 0x15, 0x6d, 0x4a, 0x6b, 0x0c, 0x5b, + 0xe9, 0x06, 0x9a, 0x60, 0x02, 0x7b, 0x28, 0x0c, 0x89, 0x6f, 0x33, 0x6e, 0x63, 0x9f, 0x92, 0x50, + 0xd8, 0xd4, 0xa9, 0xf4, 0x29, 0xd0, 0xcd, 0x2c, 0xf7, 0x9a, 0x2f, 0xa9, 0xcc, 0xaa, 0x53, 0xff, + 0xaa, 0x81, 0x81, 0x37, 0x09, 0x13, 0x08, 0xbe, 0x00, 0xe5, 0x00, 0xed, 0xdb, 0x11, 0xe1, 0x58, + 0x92, 0x62, 0x12, 0x3a, 0xa9, 0x76, 0xf3, 0xce, 0xd1, 0x49, 0xad, 0xf0, 0xe3, 0xa4, 0x76, 0x0b, + 0xb3, 0x38, 0x60, 0x71, 0xec, 0xec, 0x1a, 0x94, 0x99, 0x01, 0x12, 0x9e, 0xb1, 0x1a, 0x0a, 0x6b, + 0x2c, 0x40, 0xfb, 0xed, 0x94, 0xb5, 0x41, 0x42, 0xe7, 0xaa, 0x10, 0x27, 0xb8, 0x9b, 0x9e, 0xff, + 0x1f, 0x42, 0x16, 0xc1, 0x5d, 0x78, 0x17, 0x8c, 0xe5, 0xdd, 0xb1, 0x3d, 0x96, 0xf0, 0xb8, 0xd2, + 0x3f, 0xa5, 0x35, 0x8a, 0xd6, 0x68, 0x1e, 0x5d, 0x91, 0xc1, 0xfa, 0x17, 0x0d, 0x14, 0x9f, 0xfb, + 0x6c, 0x0f, 0x2e, 0x80, 0x41, 0x1a, 0xee, 0xf8, 0x6c, 0xaf, 0xb7, 0xba, 0x33, 0x30, 0x7c, 0x04, + 0x6e, 0xb0, 0x44, 0x28, 0x5e, 0x4f, 0x65, 0xe6, 0x68, 0xd8, 0x04, 0xa3, 0x79, 0xb3, 0xbb, 0xc8, + 0x4f, 0x88, 0x2a, 0xef, 0x5a, 0xfa, 0x48, 0xc6, 0xd9, 0x92, 0x94, 0xfa, 0x37, 0x0d, 0x0c, 0x5b, + 0x48, 0x90, 0x35, 0xe9, 0x1c, 0xf8, 0x04, 0x14, 0x23, 0x24, 0x3c, 0x55, 0x7f, 0x69, 0xfe, 0x9e, + 0x71, 0xad, 0xcb, 0x0c, 0xe9, 0x05, 0x4b, 0x91, 0xe0, 0x33, 0x30, 0xf0, 0x5e, 0xbe, 0xa4, 0xba, + 0x45, 0x69, 0xbe, 0xd1, 0x03, 0x5b, 0xbd, 0xbc, 0x95, 0xd2, 0xe4, 0xe1, 0xaa, 0x09, 0xfd, 0x3d, + 0x1f, 0x2e, 0xbb, 0x6e, 0x29, 0x52, 0x7d, 0x0d, 0x4c, 0x6e, 0x7b, 0x54, 0x10, 0x9f, 0xc6, 0x82, + 0x38, 0x8b, 0x8e, 0xc3, 0x49, 0x1c, 0xb7, 0x11, 0xe5, 0x70, 0x12, 0x0c, 0x4a, 0x2f, 0x11, 0x9e, + 0x39, 0x35, 0xdb, 0xc1, 0x2a, 0x18, 0xe2, 0x04, 0x13, 0xda, 0x25, 0x3c, 0xb3, 0xe7, 0xf9, 0xbe, + 0xfe, 0xa1, 0x0f, 0x0c, 0xcb, 0xc7, 0x6d, 0x45, 0x0c, 0x7b, 0x70, 0x1a, 0x8c, 0x10, 0xb9, 0xb0, + 0xc3, 0x24, 0xe8, 0x64, 0x3a, 0x45, 0xab, 0xa4, 0x62, 0xeb, 0x2a, 0x04, 0xdf, 0x82, 0xa1, 0xdc, + 0x14, 0xd9, 0xf5, 0x6f, 0x1b, 0xe9, 0x24, 0x19, 0xf9, 0x24, 0x19, 0xcb, 0x19, 0xa0, 0xa9, 0xcb, + 0x07, 0xfa, 0x7d, 0x52, 0x83, 0x39, 0x65, 0x86, 0x05, 0x54, 0x90, 0x20, 0x12, 0x07, 0x9f, 0x7e, + 0xd6, 0x34, 0xeb, 0x5c, 0x0a, 0xae, 0x83, 0x72, 0x7a, 0x72, 0x2c, 0x10, 0x17, 0xb6, 0x9c, 0xc5, + 0xac, 0x3d, 0xd5, 0x7f, 0xe4, 0x37, 0xf3, 0x41, 0x6d, 0x0e, 0x49, 0xfd, 0x43, 0xa9, 0x34, 0xa6, + 0xd8, 0x1b, 0x92, 0x2c, 0xd3, 0x70, 0x06, 0xc0, 0x8b, 0x7a, 0x1e, 0xa1, 0xae, 0x27, 0x2a, 0xc5, + 0x29, 0xad, 0xd1, 0x6f, 0x95, 0xff, 0x62, 0x57, 0x54, 0xfc, 0xfe, 0x63, 0x30, 0xde, 0x46, 0x78, + 0x97, 0x88, 0x65, 0xca, 0x09, 0x56, 0x05, 0x8d, 0x83, 0x52, 0x7b, 0x71, 0xe9, 0x65, 0x6b, 0xd3, + 0xde, 0x68, 0xad, 0x2f, 0x97, 0x0b, 0x17, 0x02, 0x56, 0x6b, 0x69, 0xab, 0xac, 0x55, 0x8b, 0x1f, + 0x3f, 0xeb, 0x85, 0xe6, 0xf6, 0xd1, 0xa9, 0xae, 0x1d, 0x9f, 0xea, 0xda, 0xaf, 0x53, 0x5d, 0x3b, + 0x3c, 0xd3, 0x0b, 0xc7, 0x67, 0x7a, 0xe1, 0xfb, 0x99, 0x5e, 0x78, 0xf7, 0xd4, 0xa5, 0xc2, 0x4b, + 0x3a, 0x06, 0x66, 0x81, 0x99, 0x1a, 0xd4, 0xa4, 0x1d, 0x3c, 0xeb, 0x32, 0xb3, 0x3b, 0xf7, 0xc0, + 0x0c, 0x98, 0x93, 0xf8, 0x24, 0x96, 0x3f, 0x62, 0xfa, 0x13, 0xce, 0x9e, 0xff, 0x84, 0xe2, 0x20, + 0x22, 0x71, 0x67, 0x50, 0xdd, 0xf7, 0xe1, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x14, 0x0f, + 0x18, 0x38, 0x05, 0x00, 0x00, +} + +func (m *Path) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Path) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Path) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintRateLimiting(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintRateLimiting(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Quota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintRateLimiting(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x18 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Flow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Flow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Flow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.ChannelValue.Size() + i -= size + if _, err := m.ChannelValue.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size := m.Outflow.Size() + i -= size + if _, err := m.Outflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.Inflow.Size() + i -= size + if _, err := m.Inflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Flow != nil { + { + size, err := m.Flow.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Quota != nil { + { + size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Path != nil { + { + size, err := m.Path.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WhitelistedAddressPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WhitelistedAddressPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WhitelistedAddressPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintRateLimiting(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x12 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintRateLimiting(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HourEpoch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HourEpoch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HourEpoch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EpochStartHeight != 0 { + i = encodeVarintRateLimiting(dAtA, i, uint64(m.EpochStartHeight)) + i-- + dAtA[i] = 0x20 + } + n4, err4 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.EpochStartTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.EpochStartTime):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintRateLimiting(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x1a + n5, err5 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.Duration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintRateLimiting(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x12 + if m.EpochNumber != 0 { + i = encodeVarintRateLimiting(dAtA, i, uint64(m.EpochNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintRateLimiting(dAtA []byte, offset int, v uint64) int { + offset -= sovRateLimiting(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Path) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovRateLimiting(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovRateLimiting(uint64(l)) + } + return n +} + +func (m *Quota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.MaxPercentSend.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovRateLimiting(uint64(m.DurationHours)) + } + return n +} + +func (m *Flow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Inflow.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + l = m.Outflow.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + l = m.ChannelValue.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + return n +} + +func (m *RateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Path != nil { + l = m.Path.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + } + if m.Quota != nil { + l = m.Quota.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + } + if m.Flow != nil { + l = m.Flow.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + } + return n +} + +func (m *WhitelistedAddressPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovRateLimiting(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovRateLimiting(uint64(l)) + } + return n +} + +func (m *HourEpoch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNumber != 0 { + n += 1 + sovRateLimiting(uint64(m.EpochNumber)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration) + n += 1 + l + sovRateLimiting(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.EpochStartTime) + n += 1 + l + sovRateLimiting(uint64(l)) + if m.EpochStartHeight != 0 { + n += 1 + sovRateLimiting(uint64(m.EpochStartHeight)) + } + return n +} + +func sovRateLimiting(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRateLimiting(x uint64) (n int) { + return sovRateLimiting(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Path) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Path: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Path: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Flow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Flow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Flow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Inflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Outflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ChannelValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Path == nil { + m.Path = &Path{} + } + if err := m.Path.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quota == nil { + m.Quota = &Quota{} + } + if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flow == nil { + m.Flow = &Flow{} + } + if err := m.Flow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WhitelistedAddressPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WhitelistedAddressPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WhitelistedAddressPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HourEpoch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HourEpoch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HourEpoch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNumber", wireType) + } + m.EpochNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochStartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.EpochStartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochStartHeight", wireType) + } + m.EpochStartHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochStartHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRateLimiting(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRateLimiting + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRateLimiting + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRateLimiting + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRateLimiting = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRateLimiting = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRateLimiting = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/rate-limiting/types/ratelimit.go b/modules/apps/rate-limiting/types/ratelimit.go new file mode 100644 index 00000000000..8b587870d0f --- /dev/null +++ b/modules/apps/rate-limiting/types/ratelimit.go @@ -0,0 +1,19 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (r *RateLimit) UpdateFlow(direction PacketDirection, amount sdkmath.Int) error { + switch direction { + case PACKET_SEND: + return r.Flow.AddOutflow(amount, *r.Quota) + case PACKET_RECV: + return r.Flow.AddInflow(amount, *r.Quota) + default: + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid packet direction (%s)", direction.String()) + } +} diff --git a/modules/apps/rate-limiting/types/ratelimit_test.go b/modules/apps/rate-limiting/types/ratelimit_test.go new file mode 100644 index 00000000000..7cb0465cf89 --- /dev/null +++ b/modules/apps/rate-limiting/types/ratelimit_test.go @@ -0,0 +1,20 @@ +package types_test + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func TestToLowerOnPacketDirection(t *testing.T) { + send := types.PACKET_SEND + lower := strings.ToLower(send.String()) + require.Equal(t, "packet_send", lower) + + recv := types.PACKET_RECV + lower = strings.ToLower(recv.String()) + require.Equal(t, "packet_recv", lower) +} diff --git a/modules/apps/rate-limiting/types/tx.pb.go b/modules/apps/rate-limiting/types/tx.pb.go new file mode 100644 index 00000000000..69264c1eb0d --- /dev/null +++ b/modules/apps/rate-limiting/types/tx.pb.go @@ -0,0 +1,2241 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/tx.proto + +package types + +import ( + context "context" + cosmossdk_io_math "cosmossdk.io/math" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Gov tx to add a new rate limit +type MsgAddRateLimit struct { + // signer defines the x/gov module account address or other authority signing the message + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + // ChannelId for the rate limit, on the side of the rate limited chain + ChannelOrClientId string `protobuf:"bytes,3,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentSend cosmossdk_io_math.Int `protobuf:"bytes,4,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_send"` + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentRecv cosmossdk_io_math.Int `protobuf:"bytes,5,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_recv"` + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + DurationHours uint64 `protobuf:"varint,6,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *MsgAddRateLimit) Reset() { *m = MsgAddRateLimit{} } +func (m *MsgAddRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgAddRateLimit) ProtoMessage() {} +func (*MsgAddRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{0} +} +func (m *MsgAddRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddRateLimit.Merge(m, src) +} +func (m *MsgAddRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgAddRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddRateLimit proto.InternalMessageInfo + +func (m *MsgAddRateLimit) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgAddRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgAddRateLimit) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +func (m *MsgAddRateLimit) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +// MsgAddRateLimitResponse is the return type for AddRateLimit function. +type MsgAddRateLimitResponse struct { +} + +func (m *MsgAddRateLimitResponse) Reset() { *m = MsgAddRateLimitResponse{} } +func (m *MsgAddRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgAddRateLimitResponse) ProtoMessage() {} +func (*MsgAddRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{1} +} +func (m *MsgAddRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddRateLimitResponse.Merge(m, src) +} +func (m *MsgAddRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgAddRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddRateLimitResponse proto.InternalMessageInfo + +// Gov tx to update an existing rate limit +type MsgUpdateRateLimit struct { + // signer defines the x/gov module account address or other authority signing the message + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + // ChannelId for the rate limit, on the side of the rate limited chain + ChannelOrClientId string `protobuf:"bytes,3,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentSend cosmossdk_io_math.Int `protobuf:"bytes,4,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_send"` + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentRecv cosmossdk_io_math.Int `protobuf:"bytes,5,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_recv"` + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + DurationHours uint64 `protobuf:"varint,6,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *MsgUpdateRateLimit) Reset() { *m = MsgUpdateRateLimit{} } +func (m *MsgUpdateRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateRateLimit) ProtoMessage() {} +func (*MsgUpdateRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{2} +} +func (m *MsgUpdateRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateRateLimit.Merge(m, src) +} +func (m *MsgUpdateRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateRateLimit proto.InternalMessageInfo + +func (m *MsgUpdateRateLimit) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgUpdateRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgUpdateRateLimit) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +func (m *MsgUpdateRateLimit) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +// MsgUpdateRateLimitResponse is the return type for UpdateRateLimit. +type MsgUpdateRateLimitResponse struct { +} + +func (m *MsgUpdateRateLimitResponse) Reset() { *m = MsgUpdateRateLimitResponse{} } +func (m *MsgUpdateRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateRateLimitResponse) ProtoMessage() {} +func (*MsgUpdateRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{3} +} +func (m *MsgUpdateRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateRateLimitResponse.Merge(m, src) +} +func (m *MsgUpdateRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateRateLimitResponse proto.InternalMessageInfo + +// Gov tx to remove a rate limit +type MsgRemoveRateLimit struct { + // signer defines the x/gov module account address or other authority signing the message + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + // ChannelId for the rate limit, on the side of the rate limited chain + ChannelOrClientId string `protobuf:"bytes,3,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *MsgRemoveRateLimit) Reset() { *m = MsgRemoveRateLimit{} } +func (m *MsgRemoveRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveRateLimit) ProtoMessage() {} +func (*MsgRemoveRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{4} +} +func (m *MsgRemoveRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveRateLimit.Merge(m, src) +} +func (m *MsgRemoveRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveRateLimit proto.InternalMessageInfo + +func (m *MsgRemoveRateLimit) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgRemoveRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgRemoveRateLimit) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// MsgRemoveRateLimitResponse is the response type for RemoveRateLimit +type MsgRemoveRateLimitResponse struct { +} + +func (m *MsgRemoveRateLimitResponse) Reset() { *m = MsgRemoveRateLimitResponse{} } +func (m *MsgRemoveRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveRateLimitResponse) ProtoMessage() {} +func (*MsgRemoveRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{5} +} +func (m *MsgRemoveRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveRateLimitResponse.Merge(m, src) +} +func (m *MsgRemoveRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveRateLimitResponse proto.InternalMessageInfo + +// Gov tx to reset the flow on a rate limit +type MsgResetRateLimit struct { + // signer defines the x/gov module account address or other authority signing the message + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + // ChannelId for the rate limit, on the side of the rate limited chain + ChannelOrClientId string `protobuf:"bytes,3,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *MsgResetRateLimit) Reset() { *m = MsgResetRateLimit{} } +func (m *MsgResetRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgResetRateLimit) ProtoMessage() {} +func (*MsgResetRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{6} +} +func (m *MsgResetRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgResetRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgResetRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgResetRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgResetRateLimit.Merge(m, src) +} +func (m *MsgResetRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgResetRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgResetRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgResetRateLimit proto.InternalMessageInfo + +func (m *MsgResetRateLimit) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgResetRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgResetRateLimit) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// MsgResetRateLimitResponse is the response type for ResetRateLimit. +type MsgResetRateLimitResponse struct { +} + +func (m *MsgResetRateLimitResponse) Reset() { *m = MsgResetRateLimitResponse{} } +func (m *MsgResetRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgResetRateLimitResponse) ProtoMessage() {} +func (*MsgResetRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{7} +} +func (m *MsgResetRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgResetRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgResetRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgResetRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgResetRateLimitResponse.Merge(m, src) +} +func (m *MsgResetRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgResetRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgResetRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgResetRateLimitResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgAddRateLimit)(nil), "ibc.applications.rate_limiting.v1.MsgAddRateLimit") + proto.RegisterType((*MsgAddRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.MsgAddRateLimitResponse") + proto.RegisterType((*MsgUpdateRateLimit)(nil), "ibc.applications.rate_limiting.v1.MsgUpdateRateLimit") + proto.RegisterType((*MsgUpdateRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.MsgUpdateRateLimitResponse") + proto.RegisterType((*MsgRemoveRateLimit)(nil), "ibc.applications.rate_limiting.v1.MsgRemoveRateLimit") + proto.RegisterType((*MsgRemoveRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.MsgRemoveRateLimitResponse") + proto.RegisterType((*MsgResetRateLimit)(nil), "ibc.applications.rate_limiting.v1.MsgResetRateLimit") + proto.RegisterType((*MsgResetRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.MsgResetRateLimitResponse") +} + +func init() { + proto.RegisterFile("ibc/applications/rate_limiting/v1/tx.proto", fileDescriptor_5bbfc0abda512109) +} + +var fileDescriptor_5bbfc0abda512109 = []byte{ + // 627 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xce, 0xf6, 0x17, 0x38, 0x68, 0x6b, 0x97, 0x48, 0x37, 0xdb, 0xba, 0xad, 0x01, 0xa1, 0x46, + 0xbb, 0xd3, 0x56, 0xbd, 0x14, 0x7b, 0x68, 0x3d, 0x68, 0xc1, 0xa2, 0x6c, 0x11, 0xc1, 0xcb, 0xb2, + 0xd9, 0x19, 0x36, 0x83, 0x99, 0x99, 0x65, 0x67, 0xb2, 0xc4, 0x8b, 0x88, 0x08, 0x82, 0x27, 0xff, + 0x0b, 0xaf, 0x39, 0x78, 0x11, 0xff, 0x81, 0x1e, 0x8b, 0x27, 0xf1, 0x50, 0x24, 0x39, 0xe4, 0xe6, + 0xc9, 0x3f, 0x40, 0xf6, 0x47, 0x42, 0x32, 0x41, 0x9a, 0xe6, 0xd4, 0x83, 0x97, 0x90, 0xf9, 0xde, + 0xfb, 0xde, 0x7e, 0x6f, 0xbe, 0x99, 0x79, 0xa0, 0x42, 0xaa, 0x3e, 0xf4, 0xc2, 0xb0, 0x4e, 0x7c, + 0x4f, 0x12, 0xce, 0x04, 0x8c, 0x3c, 0x89, 0xdd, 0x3a, 0xa1, 0x44, 0x12, 0x16, 0xc0, 0x78, 0x0b, + 0xca, 0xa6, 0x1d, 0x46, 0x5c, 0x72, 0xfd, 0x06, 0xa9, 0xfa, 0xf6, 0x60, 0xae, 0x3d, 0x94, 0x6b, + 0xc7, 0x5b, 0xe6, 0xa2, 0x47, 0x09, 0xe3, 0x30, 0xfd, 0xcd, 0x58, 0xe6, 0x92, 0xcf, 0x05, 0xe5, + 0x02, 0x52, 0x91, 0x56, 0xa3, 0x22, 0xc8, 0x03, 0xa5, 0x2c, 0xe0, 0xa6, 0x2b, 0x98, 0x2d, 0xf2, + 0x50, 0x31, 0xe0, 0x01, 0xcf, 0xf0, 0xe4, 0x5f, 0x86, 0x96, 0x7f, 0x4f, 0x81, 0x85, 0x43, 0x11, + 0xec, 0x21, 0xe4, 0x78, 0x12, 0x3f, 0x49, 0x3e, 0xab, 0x6f, 0x82, 0x39, 0x41, 0x02, 0x86, 0x23, + 0x43, 0x5b, 0xd3, 0xd6, 0x2f, 0xed, 0x1b, 0xdf, 0xbf, 0x6c, 0x14, 0xf3, 0x5a, 0x7b, 0x08, 0x45, + 0x58, 0x88, 0x23, 0x19, 0x11, 0x16, 0x38, 0x79, 0x9e, 0x5e, 0x04, 0xb3, 0x08, 0x33, 0x4e, 0x8d, + 0xa9, 0x84, 0xe0, 0x64, 0x0b, 0x1d, 0x82, 0xa2, 0x5f, 0xf3, 0x18, 0xc3, 0x75, 0x97, 0x47, 0xae, + 0x5f, 0x27, 0x98, 0x49, 0x97, 0x20, 0x63, 0x3a, 0x4d, 0x5a, 0xcc, 0x63, 0x4f, 0xa3, 0x87, 0x69, + 0xe4, 0x00, 0xe9, 0x8f, 0xc0, 0x55, 0xea, 0x35, 0xdd, 0x10, 0x47, 0x7e, 0x92, 0x2a, 0x30, 0x43, + 0xc6, 0x4c, 0x2a, 0xe1, 0xfa, 0xf1, 0xe9, 0x6a, 0xe1, 0xe7, 0xe9, 0xea, 0xb5, 0x4c, 0x86, 0x40, + 0xaf, 0x6c, 0xc2, 0x21, 0xf5, 0x64, 0xcd, 0x3e, 0x60, 0xd2, 0x99, 0xa7, 0x5e, 0xf3, 0x59, 0xc6, + 0x3a, 0xc2, 0x6c, 0xa4, 0x50, 0x84, 0xfd, 0xd8, 0x98, 0x3d, 0x67, 0x21, 0x07, 0xfb, 0xb1, 0x7e, + 0x13, 0xcc, 0xa3, 0x46, 0x94, 0x3a, 0xe3, 0xd6, 0x78, 0x23, 0x12, 0xc6, 0xdc, 0x9a, 0xb6, 0x3e, + 0xe3, 0x5c, 0xe9, 0xa1, 0x8f, 0x13, 0x70, 0xe7, 0xd6, 0xbb, 0x6e, 0xab, 0x92, 0x6f, 0xc6, 0xc7, + 0x6e, 0xab, 0x52, 0x4a, 0x4c, 0x4c, 0x3d, 0x84, 0xca, 0xe6, 0x96, 0x4b, 0x60, 0x49, 0x81, 0x1c, + 0x2c, 0x42, 0xce, 0x04, 0x2e, 0xff, 0x99, 0x02, 0xfa, 0xa1, 0x08, 0x9e, 0x87, 0xc8, 0x93, 0xf8, + 0xbf, 0x1d, 0x93, 0xdb, 0x71, 0x47, 0xb1, 0x63, 0x65, 0xc8, 0x0e, 0x65, 0x7f, 0xcb, 0x2b, 0xc0, + 0x1c, 0x45, 0xfb, 0xa6, 0x7c, 0xd3, 0x52, 0x53, 0x1c, 0x4c, 0x79, 0x7c, 0x01, 0x4c, 0x39, 0xa3, + 0x37, 0x45, 0x66, 0xde, 0x9b, 0x82, 0xf6, 0x7b, 0xfb, 0xaa, 0x81, 0xc5, 0x34, 0x2c, 0xb0, 0xbc, + 0x00, 0xad, 0xdd, 0x56, 0x5a, 0x5b, 0x56, 0x5a, 0x1b, 0x54, 0x59, 0x5e, 0x06, 0xa5, 0x11, 0xb0, + 0xd7, 0xd8, 0xf6, 0xe7, 0x19, 0x30, 0x7d, 0x28, 0x02, 0xfd, 0x0d, 0xb8, 0x3c, 0xf4, 0xb2, 0x6d, + 0xdb, 0x67, 0x3e, 0xb7, 0xb6, 0x72, 0x3b, 0xcd, 0x9d, 0xf3, 0x73, 0x7a, 0x3a, 0xf4, 0x0f, 0x1a, + 0x58, 0x50, 0xaf, 0xf3, 0xfd, 0xf1, 0xea, 0x29, 0x34, 0x73, 0x77, 0x22, 0xda, 0x90, 0x12, 0xf5, + 0x0c, 0x8f, 0xa9, 0x44, 0xa1, 0x8d, 0xab, 0xe4, 0x1f, 0x87, 0x4e, 0x7f, 0xaf, 0x81, 0x79, 0xe5, + 0xc4, 0xdd, 0x1b, 0xb7, 0xe2, 0x20, 0xcb, 0x7c, 0x30, 0x09, 0xab, 0x27, 0xc3, 0x9c, 0x7d, 0xdb, + 0x6d, 0x55, 0xb4, 0xfd, 0x17, 0xc7, 0x6d, 0x4b, 0x3b, 0x69, 0x5b, 0xda, 0xaf, 0xb6, 0xa5, 0x7d, + 0xea, 0x58, 0x85, 0x93, 0x8e, 0x55, 0xf8, 0xd1, 0xb1, 0x0a, 0x2f, 0x77, 0x03, 0x22, 0x6b, 0x8d, + 0xaa, 0xed, 0x73, 0x9a, 0x0f, 0x52, 0x48, 0xaa, 0xfe, 0x46, 0xc0, 0x61, 0xbc, 0xb5, 0x09, 0x29, + 0x47, 0x8d, 0x3a, 0x16, 0xc9, 0x98, 0xcf, 0xc6, 0xfb, 0x46, 0x7f, 0xbc, 0xcb, 0xd7, 0x21, 0x16, + 0xd5, 0xb9, 0x74, 0xbe, 0xde, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x13, 0x14, 0xda, 0xc9, 0x0d, + 0x08, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // Gov tx to add a new rate limit + AddRateLimit(ctx context.Context, in *MsgAddRateLimit, opts ...grpc.CallOption) (*MsgAddRateLimitResponse, error) + // Gov tx to update an existing rate limit + UpdateRateLimit(ctx context.Context, in *MsgUpdateRateLimit, opts ...grpc.CallOption) (*MsgUpdateRateLimitResponse, error) + // Gov tx to remove a rate limit + RemoveRateLimit(ctx context.Context, in *MsgRemoveRateLimit, opts ...grpc.CallOption) (*MsgRemoveRateLimitResponse, error) + // Gov tx to reset the flow on a rate limit + ResetRateLimit(ctx context.Context, in *MsgResetRateLimit, opts ...grpc.CallOption) (*MsgResetRateLimitResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) AddRateLimit(ctx context.Context, in *MsgAddRateLimit, opts ...grpc.CallOption) (*MsgAddRateLimitResponse, error) { + out := new(MsgAddRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Msg/AddRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateRateLimit(ctx context.Context, in *MsgUpdateRateLimit, opts ...grpc.CallOption) (*MsgUpdateRateLimitResponse, error) { + out := new(MsgUpdateRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Msg/UpdateRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RemoveRateLimit(ctx context.Context, in *MsgRemoveRateLimit, opts ...grpc.CallOption) (*MsgRemoveRateLimitResponse, error) { + out := new(MsgRemoveRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Msg/RemoveRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ResetRateLimit(ctx context.Context, in *MsgResetRateLimit, opts ...grpc.CallOption) (*MsgResetRateLimitResponse, error) { + out := new(MsgResetRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Msg/ResetRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // Gov tx to add a new rate limit + AddRateLimit(context.Context, *MsgAddRateLimit) (*MsgAddRateLimitResponse, error) + // Gov tx to update an existing rate limit + UpdateRateLimit(context.Context, *MsgUpdateRateLimit) (*MsgUpdateRateLimitResponse, error) + // Gov tx to remove a rate limit + RemoveRateLimit(context.Context, *MsgRemoveRateLimit) (*MsgRemoveRateLimitResponse, error) + // Gov tx to reset the flow on a rate limit + ResetRateLimit(context.Context, *MsgResetRateLimit) (*MsgResetRateLimitResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) AddRateLimit(ctx context.Context, req *MsgAddRateLimit) (*MsgAddRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddRateLimit not implemented") +} +func (*UnimplementedMsgServer) UpdateRateLimit(ctx context.Context, req *MsgUpdateRateLimit) (*MsgUpdateRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateRateLimit not implemented") +} +func (*UnimplementedMsgServer) RemoveRateLimit(ctx context.Context, req *MsgRemoveRateLimit) (*MsgRemoveRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveRateLimit not implemented") +} +func (*UnimplementedMsgServer) ResetRateLimit(ctx context.Context, req *MsgResetRateLimit) (*MsgResetRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetRateLimit not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_AddRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgAddRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).AddRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Msg/AddRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).AddRateLimit(ctx, req.(*MsgAddRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Msg/UpdateRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateRateLimit(ctx, req.(*MsgUpdateRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RemoveRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRemoveRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RemoveRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Msg/RemoveRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RemoveRateLimit(ctx, req.(*MsgRemoveRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ResetRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgResetRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ResetRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Msg/ResetRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ResetRateLimit(ctx, req.(*MsgResetRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +var Msg_serviceDesc = _Msg_serviceDesc +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibc.applications.rate_limiting.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddRateLimit", + Handler: _Msg_AddRateLimit_Handler, + }, + { + MethodName: "UpdateRateLimit", + Handler: _Msg_UpdateRateLimit_Handler, + }, + { + MethodName: "RemoveRateLimit", + Handler: _Msg_RemoveRateLimit_Handler, + }, + { + MethodName: "ResetRateLimit", + Handler: _Msg_ResetRateLimit_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibc/applications/rate_limiting/v1/tx.proto", +} + +func (m *MsgAddRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAddRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAddRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x30 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgAddRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAddRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAddRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x30 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgRemoveRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRemoveRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgRemoveRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRemoveRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgResetRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgResetRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgResetRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgResetRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgResetRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgResetRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgAddRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovTx(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovTx(uint64(m.DurationHours)) + } + return n +} + +func (m *MsgAddRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovTx(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovTx(uint64(m.DurationHours)) + } + return n +} + +func (m *MsgUpdateRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRemoveRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgRemoveRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgResetRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgResetRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgAddRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgAddRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgResetRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgResetRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgResetRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgResetRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgResetRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgResetRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/rate-limiting/v2/ibc_middleware.go b/modules/apps/rate-limiting/v2/ibc_middleware.go new file mode 100644 index 00000000000..8529811c283 --- /dev/null +++ b/modules/apps/rate-limiting/v2/ibc_middleware.go @@ -0,0 +1,121 @@ +package v2 + +import ( + "encoding/json" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" + "github.com/cosmos/ibc-go/v10/modules/core/api" +) + +var _ api.IBCModule = (*IBCMiddleware)(nil) + +type IBCMiddleware struct { + app api.IBCModule + keeper keeper.Keeper +} + +func NewIBCMiddleware(k keeper.Keeper, app api.IBCModule) IBCMiddleware { + return IBCMiddleware{ + app: app, + keeper: k, + } +} + +func (im IBCMiddleware) OnSendPacket(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, payload channeltypesv2.Payload, signer sdk.AccAddress) error { + packet, err := v2ToV1Packet(payload, sourceClient, destinationClient, sequence) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 rate limiting OnSendPacket failed to convert v2 packet to v1 packet", "error", err) + return err + } + if err := im.keeper.SendRateLimitedPacket(ctx, packet.SourcePort, packet.SourceChannel, packet.TimeoutHeight, packet.TimeoutTimestamp, packet.Data); err != nil { + im.keeper.Logger(ctx).Error("ICS20 packet send was denied", "error", err) + return err + } + return im.app.OnSendPacket(ctx, sourceClient, destinationClient, sequence, payload, signer) +} + +func (im IBCMiddleware) OnRecvPacket(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) channeltypesv2.RecvPacketResult { + packet, err := v2ToV1Packet(payload, sourceClient, destinationClient, sequence) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 rate limiting OnRecvPacket failed to convert v2 packet to v1 packet", "error", err) + return channeltypesv2.RecvPacketResult{ + Status: channeltypesv2.PacketStatus_Failure, + Acknowledgement: channeltypes.NewErrorAcknowledgement(err).Acknowledgement(), + } + } + // Check if the packet would cause the rate limit to be exceeded, + // and if so, return an ack error + if err := im.keeper.ReceiveRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error("ICS20 packet receive was denied", "error", err) + return channeltypesv2.RecvPacketResult{ + Status: channeltypesv2.PacketStatus_Failure, + Acknowledgement: channeltypes.NewErrorAcknowledgement(err).Acknowledgement(), + } + } + + // If the packet was not rate-limited, pass it down to the Transfer OnRecvPacket callback + return im.app.OnRecvPacket(ctx, sourceClient, destinationClient, sequence, payload, relayer) +} + +func (im IBCMiddleware) OnTimeoutPacket(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { + packet, err := v2ToV1Packet(payload, sourceClient, destinationClient, sequence) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 rate limiting OnTimeoutPacket failed to convert v2 packet to v1 packet", "error", err) + return err + } + if err := im.keeper.TimeoutRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error("ICS20 RateLimited OnTimeoutPacket failed", "error", err) + return err + } + return im.app.OnTimeoutPacket(ctx, sourceClient, destinationClient, sequence, payload, relayer) +} + +func (im IBCMiddleware) OnAcknowledgementPacket(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, acknowledgement []byte, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { + packet, err := v2ToV1Packet(payload, sourceClient, destinationClient, sequence) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 rate limiting OnAckPacketfailed to convert v2 packet to v1 packet", "error", err) + return err + } + if err := im.keeper.AcknowledgeRateLimitedPacket(ctx, packet, acknowledgement); err != nil { + im.keeper.Logger(ctx).Error("ICS20 RateLimited OnAckPacket failed", "error", err) + return err + } + return im.app.OnAcknowledgementPacket(ctx, sourceClient, destinationClient, sequence, acknowledgement, payload, relayer) +} + +func v2ToV1Packet(payload channeltypesv2.Payload, sourceClient, destinationClient string, sequence uint64) (channeltypes.Packet, error) { + transferRepresentation, err := transfertypes.UnmarshalPacketData(payload.Value, payload.Version, payload.Encoding) + if err != nil { + return channeltypes.Packet{}, err + } + + packetData := transfertypes.FungibleTokenPacketData{ + Denom: transferRepresentation.Token.Denom.Path(), + Amount: transferRepresentation.Token.Amount, + Sender: transferRepresentation.Sender, + Receiver: transferRepresentation.Receiver, + Memo: transferRepresentation.Memo, + } + + packetDataBz, err := json.Marshal(packetData) + if err != nil { + return channeltypes.Packet{}, err + } + + return channeltypes.Packet{ + Sequence: sequence, + SourcePort: payload.SourcePort, + SourceChannel: sourceClient, + DestinationPort: payload.DestinationPort, + DestinationChannel: destinationClient, + Data: packetDataBz, + TimeoutHeight: clienttypes.Height{}, + TimeoutTimestamp: 0, + }, nil +} diff --git a/modules/apps/rate-limiting/v2/ibc_middleware_test.go b/modules/apps/rate-limiting/v2/ibc_middleware_test.go new file mode 100644 index 00000000000..cb5c3a7aeba --- /dev/null +++ b/modules/apps/rate-limiting/v2/ibc_middleware_test.go @@ -0,0 +1,138 @@ +package v2 + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" +) + +func TestV2ToV1Packet_WithJSONEncoding(t *testing.T) { + payloadValue := transfertypes.FungibleTokenPacketData{ + Denom: "denom", + Amount: "100", + Sender: "sender", + Receiver: "receiver", + Memo: "memo", + } + payloadValueBz, err := transfertypes.MarshalPacketData(payloadValue, transfertypes.V1, transfertypes.EncodingJSON) + require.NoError(t, err) + + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingJSON, + Value: payloadValueBz, + } + + v1Packet, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.NoError(t, err) + require.Equal(t, uint64(1), v1Packet.Sequence) + require.Equal(t, payload.SourcePort, v1Packet.SourcePort) + require.Equal(t, "sourceClient", v1Packet.SourceChannel) + require.Equal(t, payload.DestinationPort, v1Packet.DestinationPort) + require.Equal(t, "destinationClient", v1Packet.DestinationChannel) + + var v1PacketData transfertypes.FungibleTokenPacketData + err = json.Unmarshal(v1Packet.Data, &v1PacketData) + require.NoError(t, err) + require.Equal(t, payloadValue, v1PacketData) +} + +func TestV2ToV1Packet_WithABIEncoding(t *testing.T) { + payloadValue := transfertypes.FungibleTokenPacketData{ + Denom: "denom", + Amount: "100", + Sender: "sender", + Receiver: "receiver", + Memo: "memo", + } + + payloadValueBz, err := transfertypes.MarshalPacketData(payloadValue, transfertypes.V1, transfertypes.EncodingABI) + require.NoError(t, err) + + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingABI, + Value: payloadValueBz, + } + + v1Packet, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.NoError(t, err) + require.Equal(t, uint64(1), v1Packet.Sequence) + require.Equal(t, payload.SourcePort, v1Packet.SourcePort) + require.Equal(t, "sourceClient", v1Packet.SourceChannel) + require.Equal(t, payload.DestinationPort, v1Packet.DestinationPort) + require.Equal(t, "destinationClient", v1Packet.DestinationChannel) + + var v1PacketData transfertypes.FungibleTokenPacketData + err = json.Unmarshal(v1Packet.Data, &v1PacketData) + require.NoError(t, err) + require.Equal(t, payloadValue, v1PacketData) +} + +func TestV2ToV1Packet_WithProtobufEncoding(t *testing.T) { + payloadValue := transfertypes.FungibleTokenPacketData{ + Denom: "denom", + Amount: "100", + Sender: "sender", + Receiver: "receiver", + Memo: "memo", + } + + payloadValueBz, err := transfertypes.MarshalPacketData(payloadValue, transfertypes.V1, transfertypes.EncodingProtobuf) + require.NoError(t, err) + + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingProtobuf, + Value: payloadValueBz, + } + + v1Packet, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.NoError(t, err) + require.Equal(t, uint64(1), v1Packet.Sequence) + require.Equal(t, payload.SourcePort, v1Packet.SourcePort) + require.Equal(t, "sourceClient", v1Packet.SourceChannel) + require.Equal(t, payload.DestinationPort, v1Packet.DestinationPort) + require.Equal(t, "destinationClient", v1Packet.DestinationChannel) + + var v1PacketData transfertypes.FungibleTokenPacketData + err = json.Unmarshal(v1Packet.Data, &v1PacketData) + require.NoError(t, err) + require.Equal(t, payloadValue, v1PacketData) +} + +func TestV2ToV1Packet_WithNilPayload(t *testing.T) { + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingABI, + Value: nil, + } + + _, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.Error(t, err) +} + +func TestV2ToV1Packet_WithEmptyPayload(t *testing.T) { + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingABI, + Value: []byte{}, + } + + _, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.Error(t, err) +} diff --git a/modules/apps/transfer/v2/alias_test.go b/modules/apps/transfer/v2/alias_test.go index eb67e381778..6e8c53f43ef 100644 --- a/modules/apps/transfer/v2/alias_test.go +++ b/modules/apps/transfer/v2/alias_test.go @@ -28,8 +28,8 @@ func (suite *TransferTestSuite) TestAliasedTransferChannel() { path.Setup() // mock v1 format for both sides of the channel - mockV1Format(path.EndpointA) - mockV1Format(path.EndpointB) + suite.mockV1Format(path.EndpointA) + suite.mockV1Format(path.EndpointB) // migrate the store for both chains err := v11.MigrateStore(suite.chainA.GetContext(), runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), suite.chainA.App.AppCodec(), suite.chainA.App.GetIBCKeeper()) @@ -166,8 +166,8 @@ func (suite *TransferTestSuite) TestDifferentAppPostAlias() { path.Setup() // mock v1 format for both sides of the channel - mockV1Format(path.EndpointA) - mockV1Format(path.EndpointB) + suite.mockV1Format(path.EndpointA) + suite.mockV1Format(path.EndpointB) // migrate the store for both chains err := v11.MigrateStore(suite.chainA.GetContext(), runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), suite.chainA.App.AppCodec(), suite.chainA.App.GetIBCKeeper()) @@ -259,7 +259,7 @@ func (suite *TransferTestSuite) assertReceiverEqual(chain *ibctesting.TestChain, suite.Require().Equal(expectedAmount, amount.Amount, "receiver balance should match expected amount") } -func mockV1Format(endpoint *ibctesting.Endpoint) { +func (suite *TransferTestSuite) mockV1Format(endpoint *ibctesting.Endpoint) { // mock v1 format by setting the sequence in the old key seq, ok := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID) if !ok { @@ -270,6 +270,8 @@ func mockV1Format(endpoint *ibctesting.Endpoint) { // so we can migrate it in our tests storeService := runtime.NewKVStoreService(endpoint.Chain.GetSimApp().GetKey(ibcexported.StoreKey)) store := storeService.OpenKVStore(endpoint.Chain.GetContext()) - store.Set(v11.NextSequenceSendV1Key(endpoint.ChannelConfig.PortID, endpoint.ChannelID), sdk.Uint64ToBigEndian(seq)) - store.Delete(hostv2.NextSequenceSendKey(endpoint.ChannelID)) + err := store.Set(v11.NextSequenceSendV1Key(endpoint.ChannelConfig.PortID, endpoint.ChannelID), sdk.Uint64ToBigEndian(seq)) + suite.Require().NoError(err) + err = store.Delete(hostv2.NextSequenceSendKey(endpoint.ChannelID)) + suite.Require().NoError(err) } diff --git a/modules/core/04-channel/migrations/v11/store_test.go b/modules/core/04-channel/migrations/v11/store_test.go index dc386179421..f91985181d1 100644 --- a/modules/core/04-channel/migrations/v11/store_test.go +++ b/modules/core/04-channel/migrations/v11/store_test.go @@ -60,15 +60,18 @@ func (suite *MigrationsV11TestSuite) TestMigrateStore() { // to mock channels that were created before the new changes seq, ok := ibcKeeper.ChannelKeeper.GetNextSequenceSend(ctx, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) suite.Require().True(ok) - store.Delete(hostv2.NextSequenceSendKey(path.EndpointA.ChannelID)) - store.Set(v11.NextSequenceSendV1Key(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), sdk.Uint64ToBigEndian(seq)) + err := store.Delete(hostv2.NextSequenceSendKey(path.EndpointA.ChannelID)) + suite.Require().NoError(err) + err = store.Set(v11.NextSequenceSendV1Key(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), sdk.Uint64ToBigEndian(seq)) + suite.Require().NoError(err) // Remove counterparty to mock pre migration channels clientStore := ibcKeeper.ClientKeeper.ClientStore(ctx, path.EndpointA.ChannelID) clientStore.Delete(clientv2types.CounterpartyKey()) // Remove alias to mock pre migration channels - store.Delete(channelv2types.AliasKey(path.EndpointA.ChannelID)) + err = store.Delete(channelv2types.AliasKey(path.EndpointA.ChannelID)) + suite.Require().NoError(err) if i%5 == 0 { channel, ok := ibcKeeper.ChannelKeeper.GetChannel(ctx, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) @@ -86,40 +89,40 @@ func (suite *MigrationsV11TestSuite) TestMigrateStore() { suite.Require().NoError(err) for i := range numberOfChannels { - channelId := types.FormatChannelIdentifier(uint64(i)) - channel, ok := ibcKeeper.ChannelKeeper.GetChannel(ctx, mock.PortID, channelId) + channelID := types.FormatChannelIdentifier(uint64(i)) + channel, ok := ibcKeeper.ChannelKeeper.GetChannel(ctx, mock.PortID, channelID) suite.Require().True(ok, i) if channel.Ordering == types.UNORDERED && channel.State == types.OPEN { // ensure counterparty set - expCounterparty, ok := ibcKeeper.ChannelKeeper.GetV2Counterparty(ctx, mock.PortID, channelId) + expCounterparty, ok := ibcKeeper.ChannelKeeper.GetV2Counterparty(ctx, mock.PortID, channelID) suite.Require().True(ok) - counterparty, ok := ibcKeeper.ClientV2Keeper.GetClientCounterparty(ctx, channelId) + counterparty, ok := ibcKeeper.ClientV2Keeper.GetClientCounterparty(ctx, channelID) suite.Require().True(ok) suite.Require().Equal(expCounterparty, counterparty, "counterparty not set correctly") // ensure base client mapping set - baseClientId, ok := ibcKeeper.ChannelKeeperV2.GetClientForAlias(ctx, channelId) + baseClientID, ok := ibcKeeper.ChannelKeeperV2.GetClientForAlias(ctx, channelID) suite.Require().True(ok) - suite.Require().NotEqual(channelId, baseClientId) + suite.Require().NotEqual(channelID, baseClientID) connection, ok := ibcKeeper.ConnectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) suite.Require().True(ok) - suite.Require().Equal(connection.ClientId, baseClientId, "base client mapping not set correctly") + suite.Require().Equal(connection.ClientId, baseClientID, "base client mapping not set correctly") } else { // ensure counterparty not set for closed channels - _, ok := ibcKeeper.ClientV2Keeper.GetClientCounterparty(ctx, channelId) + _, ok := ibcKeeper.ClientV2Keeper.GetClientCounterparty(ctx, channelID) suite.Require().False(ok, "counterparty should not be set for closed channels") // ensure base client mapping not set for closed channels - baseClientId, ok := ibcKeeper.ChannelKeeperV2.GetClientForAlias(ctx, channelId) + baseClientID, ok := ibcKeeper.ChannelKeeperV2.GetClientForAlias(ctx, channelID) suite.Require().False(ok) - suite.Require().Equal("", baseClientId, "base client mapping should not be set for closed channels") + suite.Require().Equal("", baseClientID, "base client mapping should not be set for closed channels") } // ensure that sequence migrated correctly - bz, _ := store.Get(v11.NextSequenceSendV1Key(mock.PortID, channelId)) + bz, _ := store.Get(v11.NextSequenceSendV1Key(mock.PortID, channelID)) suite.Require().Nil(bz) - seq, ok := ibcKeeper.ChannelKeeper.GetNextSequenceSend(ctx, mock.PortID, channelId) + seq, ok := ibcKeeper.ChannelKeeper.GetNextSequenceSend(ctx, mock.PortID, channelID) suite.Require().True(ok) suite.Require().Equal(uint64(1), seq) diff --git a/modules/core/04-channel/types/errors.go b/modules/core/04-channel/types/errors.go index 9e13edef03c..add5f35f2a5 100644 --- a/modules/core/04-channel/types/errors.go +++ b/modules/core/04-channel/types/errors.go @@ -59,4 +59,5 @@ var ( ErrPruningSequenceStartNotFound = errorsmod.Register(SubModuleName, 41, "pruning sequence start not found") ErrRecvStartSequenceNotFound = errorsmod.Register(SubModuleName, 42, "recv start sequence not found") ErrInvalidCommitment = errorsmod.Register(SubModuleName, 43, "invalid commitment") + ErrKeeperNotSet = errorsmod.Register(SubModuleName, 44, "keeper not set") ) diff --git a/modules/core/04-channel/v2/keeper/packet.go b/modules/core/04-channel/v2/keeper/packet.go index e40ee7e6174..a59b8020c7f 100644 --- a/modules/core/04-channel/v2/keeper/packet.go +++ b/modules/core/04-channel/v2/keeper/packet.go @@ -57,26 +57,26 @@ func (k *Keeper) sendPacket( } // Before we do client keeper level checks, we first get underlying base clientID - clientId := packet.SourceClient - if underlyingClientId, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { - clientId = underlyingClientId + clientID := packet.SourceClient + if underlyingClientID, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { + clientID = underlyingClientID } // check that the client of counterparty chain is still active - if status := k.ClientKeeper.GetClientStatus(ctx, clientId); status != exported.Active { - return 0, "", errorsmod.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientId, status) + if status := k.ClientKeeper.GetClientStatus(ctx, clientID); status != exported.Active { + return 0, "", errorsmod.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status) } // retrieve latest height and timestamp of the client of counterparty chain - latestHeight := k.ClientKeeper.GetClientLatestHeight(ctx, clientId) + latestHeight := k.ClientKeeper.GetClientLatestHeight(ctx, clientID) if latestHeight.IsZero() { - return 0, "", errorsmod.Wrapf(clienttypes.ErrInvalidHeight, "cannot send packet using client (%s) with zero height", clientId) + return 0, "", errorsmod.Wrapf(clienttypes.ErrInvalidHeight, "cannot send packet using client (%s) with zero height", clientID) } // client timestamps are in nanoseconds while packet timeouts are in seconds // thus to compare them, we convert the client timestamp to seconds in uint64 // to be consistent with IBC V2 specified timeout behaviour - latestTimestampNano, err := k.ClientKeeper.GetClientTimestampAtHeight(ctx, clientId, latestHeight) + latestTimestampNano, err := k.ClientKeeper.GetClientTimestampAtHeight(ctx, clientID, latestHeight) if err != nil { return 0, "", err } @@ -146,21 +146,21 @@ func (k *Keeper) recvPacket( commitment := types.CommitPacket(packet) // Before we do client keeper level checks, we first get underlying base clientID - clientId := packet.DestinationClient - if underlyingClientId, isAlias := k.GetClientForAlias(ctx, packet.DestinationClient); isAlias { - clientId = underlyingClientId + clientID := packet.DestinationClient + if underlyingClientID, isAlias := k.GetClientForAlias(ctx, packet.DestinationClient); isAlias { + clientID = underlyingClientID } if err := k.ClientKeeper.VerifyMembership( ctx, - clientId, + clientID, proofHeight, 0, 0, proof, merklePath, commitment, ); err != nil { - return errorsmod.Wrapf(err, "failed packet commitment verification for client (%s)", clientId) + return errorsmod.Wrapf(err, "failed packet commitment verification for client (%s)", clientID) } // Set Packet Receipt to prevent timeout from occurring on counterparty @@ -275,21 +275,21 @@ func (k *Keeper) acknowledgePacket(ctx sdk.Context, packet types.Packet, acknowl merklePath := types.BuildMerklePath(counterparty.MerklePrefix, path) // Before we do client keeper level checks, we first get underlying base clientID - clientId := packet.SourceClient - if underlyingClientId, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { - clientId = underlyingClientId + clientID := packet.SourceClient + if underlyingClientID, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { + clientID = underlyingClientID } if err := k.ClientKeeper.VerifyMembership( ctx, - clientId, + clientID, proofHeight, 0, 0, proof, merklePath, types.CommitAcknowledgement(acknowledgement), ); err != nil { - return errorsmod.Wrapf(err, "failed packet acknowledgement verification for client (%s)", clientId) + return errorsmod.Wrapf(err, "failed packet acknowledgement verification for client (%s)", clientID) } k.DeletePacketCommitment(ctx, packet.SourceClient, packet.Sequence) @@ -327,16 +327,16 @@ func (k *Keeper) timeoutPacket( } // Before we do client keeper level checks, we first get underlying base clientID - clientId := packet.SourceClient - if underlyingClientId, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { - clientId = underlyingClientId + clientID := packet.SourceClient + if underlyingClientID, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { + clientID = underlyingClientID } // check that timeout timestamp has passed on the other end // client timestamps are in nanoseconds while packet timeouts are in seconds // so we convert client timestamp to seconds in uint64 to be consistent // with IBC V2 timeout behaviour - proofTimestampNano, err := k.ClientKeeper.GetClientTimestampAtHeight(ctx, clientId, proofHeight) + proofTimestampNano, err := k.ClientKeeper.GetClientTimestampAtHeight(ctx, clientID, proofHeight) if err != nil { return err } @@ -368,13 +368,13 @@ func (k *Keeper) timeoutPacket( if err := k.ClientKeeper.VerifyNonMembership( ctx, - clientId, + clientID, proofHeight, 0, 0, proof, merklePath, ); err != nil { - return errorsmod.Wrapf(err, "failed packet receipt absence verification for client (%s)", clientId) + return errorsmod.Wrapf(err, "failed packet receipt absence verification for client (%s)", clientID) } // delete packet commitment to prevent replay diff --git a/modules/core/04-channel/v2/keeper/packet_test.go b/modules/core/04-channel/v2/keeper/packet_test.go index 274876da3c8..a2996ba9591 100644 --- a/modules/core/04-channel/v2/keeper/packet_test.go +++ b/modules/core/04-channel/v2/keeper/packet_test.go @@ -613,8 +613,8 @@ func (suite *KeeperTestSuite) TestAliasedChannel() { path.Setup() // mock v1 format for both sides of the channel - mockV1Format(path.EndpointA) - mockV1Format(path.EndpointB) + suite.mockV1Format(path.EndpointA) + suite.mockV1Format(path.EndpointB) // migrate the store for both chains err := v11.MigrateStore(suite.chainA.GetContext(), runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), suite.chainA.App.AppCodec(), suite.chainA.App.GetIBCKeeper()) @@ -758,7 +758,7 @@ func (suite *KeeperTestSuite) TestPostMigrationAliasing() { suite.Require().NoError(err, "timeout v2 packet failed") } -func mockV1Format(endpoint *ibctesting.Endpoint) { +func (suite *KeeperTestSuite) mockV1Format(endpoint *ibctesting.Endpoint) { // mock v1 format by setting the sequence in the old key seq, ok := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID) if !ok { @@ -769,8 +769,10 @@ func mockV1Format(endpoint *ibctesting.Endpoint) { // so we can migrate it in our tests storeService := runtime.NewKVStoreService(endpoint.Chain.GetSimApp().GetKey(ibcexported.StoreKey)) store := storeService.OpenKVStore(endpoint.Chain.GetContext()) - store.Set(v11.NextSequenceSendV1Key(endpoint.ChannelConfig.PortID, endpoint.ChannelID), sdk.Uint64ToBigEndian(seq)) - store.Delete(hostv2.NextSequenceSendKey(endpoint.ChannelID)) + err := store.Set(v11.NextSequenceSendV1Key(endpoint.ChannelConfig.PortID, endpoint.ChannelID), sdk.Uint64ToBigEndian(seq)) + suite.Require().NoError(err) + err = store.Delete(hostv2.NextSequenceSendKey(endpoint.ChannelID)) + suite.Require().NoError(err) // Remove counterparty to mock pre migration channels clientStore := endpoint.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(endpoint.Chain.GetContext(), endpoint.ChannelID) diff --git a/proto/ibc/applications/rate_limiting/v1/genesis.proto b/proto/ibc/applications/rate_limiting/v1/genesis.proto new file mode 100644 index 00000000000..6e734b4d4c1 --- /dev/null +++ b/proto/ibc/applications/rate_limiting/v1/genesis.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package ibc.applications.rate_limiting.v1; + +import "gogoproto/gogo.proto"; +import "ibc/applications/rate_limiting/v1/rate_limiting.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types"; + +// GenesisState defines the ratelimit module's genesis state. +message GenesisState { + repeated RateLimit rate_limits = 1 [(gogoproto.nullable) = false]; + repeated WhitelistedAddressPair whitelisted_address_pairs = 2 [(gogoproto.nullable) = false]; + repeated string blacklisted_denoms = 3; + repeated string pending_send_packet_sequence_numbers = 4; + HourEpoch hour_epoch = 5 [(gogoproto.nullable) = false]; +} diff --git a/proto/ibc/applications/rate_limiting/v1/query.proto b/proto/ibc/applications/rate_limiting/v1/query.proto new file mode 100644 index 00000000000..6d9b594159d --- /dev/null +++ b/proto/ibc/applications/rate_limiting/v1/query.proto @@ -0,0 +1,100 @@ +syntax = "proto3"; +package ibc.applications.rate_limiting.v1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "ibc/applications/rate_limiting/v1/rate_limiting.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types"; + +// Query defines the gRPC querier service. +service Query { + // Queries all rate limits + rpc AllRateLimits(QueryAllRateLimitsRequest) returns (QueryAllRateLimitsResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimits"; + } + + // Queries a specific rate limit by channel ID and denom + // Ex: + // - /ratelimit/{channel_or_client_id}/by_denom?denom={denom} + rpc RateLimit(QueryRateLimitRequest) returns (QueryRateLimitResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/" + "ratelimit/{channel_or_client_id}/by_denom"; + } + + // Queries all the rate limits for a given chain + rpc RateLimitsByChainID(QueryRateLimitsByChainIDRequest) returns (QueryRateLimitsByChainIDResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/ratelimits/{chain_id}"; + } + + // Queries all the rate limits for a given channel ID + rpc RateLimitsByChannelOrClientID(QueryRateLimitsByChannelOrClientIDRequest) + returns (QueryRateLimitsByChannelOrClientIDResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/ratelimits/{channel_or_client_id}"; + } + + // Queries all blacklisted denoms + rpc AllBlacklistedDenoms(QueryAllBlacklistedDenomsRequest) returns (QueryAllBlacklistedDenomsResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/blacklisted_denoms"; + } + + // Queries all whitelisted address pairs + rpc AllWhitelistedAddresses(QueryAllWhitelistedAddressesRequest) returns (QueryAllWhitelistedAddressesResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/whitelisted_addresses"; + } +} + +// Queries all rate limits +message QueryAllRateLimitsRequest {} + +// QueryAllRateLimitsResponse returns all the rate limits stored on the chain. +message QueryAllRateLimitsResponse { + repeated RateLimit rate_limits = 1 [(gogoproto.nullable) = false]; +} + +// Queries a specific rate limit by channel ID and denom +message QueryRateLimitRequest { + string denom = 1; + string channel_or_client_id = 2; +} + +// QueryRateLimitResponse returns a rate limit by denom and channel_or_client_id combination. +message QueryRateLimitResponse { + RateLimit rate_limit = 1; +} + +// Queries all the rate limits for a given chain +message QueryRateLimitsByChainIDRequest { + string chain_id = 1; +} + +// QueryRateLimitsByChainIDResponse returns all rate-limits by a chain. +message QueryRateLimitsByChainIDResponse { + repeated RateLimit rate_limits = 1 [(gogoproto.nullable) = false]; +} + +// Queries all the rate limits for a given channel or client ID +message QueryRateLimitsByChannelOrClientIDRequest { + string channel_or_client_id = 1; +} + +// QueryRateLimitsByChannelOrClientIDResponse returns all rate-limits by a channel or client id. +message QueryRateLimitsByChannelOrClientIDResponse { + repeated RateLimit rate_limits = 1 [(gogoproto.nullable) = false]; +} + +// Queries all blacklisted denoms +message QueryAllBlacklistedDenomsRequest {} + +// QueryAllBlacklistedDenomsResponse returns all the blacklisted denosm. +message QueryAllBlacklistedDenomsResponse { + repeated string denoms = 1; +} + +// Queries all whitelisted address pairs +message QueryAllWhitelistedAddressesRequest {} + +// QueryAllWhitelistedAddressesResponse returns all whitelisted pairs. +message QueryAllWhitelistedAddressesResponse { + repeated WhitelistedAddressPair address_pairs = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/ibc/applications/rate_limiting/v1/rate_limiting.proto b/proto/ibc/applications/rate_limiting/v1/rate_limiting.proto new file mode 100644 index 00000000000..81fb84e5e3e --- /dev/null +++ b/proto/ibc/applications/rate_limiting/v1/rate_limiting.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; +package ibc.applications.rate_limiting.v1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types"; + +// PacketDirection defines whether the transfer packet is being sent from +// this chain or is being received on this chain +enum PacketDirection { + option (gogoproto.goproto_enum_prefix) = false; + + PACKET_SEND = 0; + PACKET_RECV = 1; +} + +// Path holds the denom and channelID that define the rate limited route +message Path { + string denom = 1; + string channel_or_client_id = 2; +} + +// Quota defines the rate limit thresholds for transfer packets +message Quota { + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_send = 1 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_recv = 2 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + uint64 duration_hours = 3; +} + +// Flow tracks all the inflows and outflows of a channel. +message Flow { + // Inflow defines the total amount of inbound transfers for the given + // rate limit in the current window + string inflow = 1 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // Outflow defines the total amount of outbound transfers for the given + // rate limit in the current window + string outflow = 2 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // ChannelValue stores the total supply of the denom at the start of + // the rate limit. This is used as the denominator when checking + // the rate limit threshold + // The ChannelValue is fixed for the duration of the rate limit window + string channel_value = 3 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; +} + +// RateLimit stores all the context about a given rate limit, including +// the relevant denom and channel, rate limit thresholds, and current +// progress towards the limits +message RateLimit { + Path path = 1; + Quota quota = 2; + Flow flow = 3; +} + +// WhitelistedAddressPair represents a sender-receiver combo that is +// not subject to rate limit restrictions +message WhitelistedAddressPair { + string sender = 1; + string receiver = 2; +} + +// HourEpoch is the epoch type. +message HourEpoch { + uint64 epoch_number = 1; + google.protobuf.Duration duration = 2 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true, (gogoproto.jsontag) = "duration,omitempty"]; + google.protobuf.Timestamp epoch_start_time = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + int64 epoch_start_height = 4; +} diff --git a/proto/ibc/applications/rate_limiting/v1/tx.proto b/proto/ibc/applications/rate_limiting/v1/tx.proto new file mode 100644 index 00000000000..9c5b6c25deb --- /dev/null +++ b/proto/ibc/applications/rate_limiting/v1/tx.proto @@ -0,0 +1,109 @@ +syntax = "proto3"; +package ibc.applications.rate_limiting.v1; + +import "amino/amino.proto"; +import "cosmos/msg/v1/msg.proto"; +import "cosmos_proto/cosmos.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types"; + +// Msg service for rate limit txs +service Msg { + option (cosmos.msg.v1.service) = true; + + // Gov tx to add a new rate limit + rpc AddRateLimit(MsgAddRateLimit) returns (MsgAddRateLimitResponse); + // Gov tx to update an existing rate limit + rpc UpdateRateLimit(MsgUpdateRateLimit) returns (MsgUpdateRateLimitResponse); + // Gov tx to remove a rate limit + rpc RemoveRateLimit(MsgRemoveRateLimit) returns (MsgRemoveRateLimitResponse); + // Gov tx to reset the flow on a rate limit + rpc ResetRateLimit(MsgResetRateLimit) returns (MsgResetRateLimitResponse); +} + +// Gov tx to add a new rate limit +message MsgAddRateLimit { + option (cosmos.msg.v1.signer) = "signer"; + option (amino.name) = "ratelimit/MsgAddRateLimit"; + + // signer defines the x/gov module account address or other authority signing the message + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + string denom = 2; + // ChannelId for the rate limit, on the side of the rate limited chain + string channel_or_client_id = 3; + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_send = 4 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_recv = 5 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + uint64 duration_hours = 6; +} + +// MsgAddRateLimitResponse is the return type for AddRateLimit function. +message MsgAddRateLimitResponse {} + +// Gov tx to update an existing rate limit +message MsgUpdateRateLimit { + option (cosmos.msg.v1.signer) = "signer"; + option (amino.name) = "ratelimit/MsgUpdateRateLimit"; + + // signer defines the x/gov module account address or other authority signing the message + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + string denom = 2; + // ChannelId for the rate limit, on the side of the rate limited chain + string channel_or_client_id = 3; + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_send = 4 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_recv = 5 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + uint64 duration_hours = 6; +} + +// MsgUpdateRateLimitResponse is the return type for UpdateRateLimit. +message MsgUpdateRateLimitResponse {} + +// Gov tx to remove a rate limit +message MsgRemoveRateLimit { + option (cosmos.msg.v1.signer) = "signer"; + option (amino.name) = "ratelimit/MsgRemoveRateLimit"; + + // signer defines the x/gov module account address or other authority signing the message + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + string denom = 2; + // ChannelId for the rate limit, on the side of the rate limited chain + string channel_or_client_id = 3; +} + +// MsgRemoveRateLimitResponse is the response type for RemoveRateLimit +message MsgRemoveRateLimitResponse {} + +// Gov tx to reset the flow on a rate limit +message MsgResetRateLimit { + option (cosmos.msg.v1.signer) = "signer"; + option (amino.name) = "ratelimit/MsgResetRateLimit"; + + // signer defines the x/gov module account address or other authority signing the message + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + string denom = 2; + // ChannelId for the rate limit, on the side of the rate limited chain + string channel_or_client_id = 3; +} + +// MsgResetRateLimitResponse is the response type for ResetRateLimit. +message MsgResetRateLimitResponse {} diff --git a/simapp/app.go b/simapp/app.go index d1e8dd99ba7..59a0e2d9ed6 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -109,6 +109,9 @@ import ( packetforward "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware" packetforwardkeeper "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" packetforwardtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + ratelimiting "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting" + ratelimitkeeper "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + ratelimittypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" "github.com/cosmos/ibc-go/v10/modules/apps/transfer" ibctransferkeeper "github.com/cosmos/ibc-go/v10/modules/apps/transfer/keeper" ibctransfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" @@ -177,6 +180,7 @@ type SimApp struct { ConsensusParamsKeeper consensusparamkeeper.Keeper CircuitKeeper circuitkeeper.Keeper PFMKeeper *packetforwardkeeper.Keeper + RateLimitKeeper ratelimitkeeper.Keeper // the module manager ModuleManager *module.Manager @@ -221,6 +225,7 @@ func NewSimApp( appCodec := codec.NewProtoCodec(interfaceRegistry) legacyAmino := codec.NewLegacyAmino() txConfig := authtx.NewTxConfig(appCodec, authtx.DefaultSignModes) + govAuthority := authtypes.NewModuleAddress(govtypes.ModuleName).String() std.RegisterLegacyAminoCodec(legacyAmino) std.RegisterInterfaces(interfaceRegistry) @@ -262,7 +267,7 @@ func NewSimApp( minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, govtypes.StoreKey, group.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, evidencetypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, - authzkeeper.StoreKey, consensusparamtypes.StoreKey, circuittypes.StoreKey, packetforwardtypes.StoreKey, + authzkeeper.StoreKey, consensusparamtypes.StoreKey, circuittypes.StoreKey, packetforwardtypes.StoreKey, ratelimittypes.StoreKey, ) // register streaming services @@ -280,36 +285,36 @@ func NewSimApp( } // set the BaseApp's parameter store - app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), runtime.EventService{}) + app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), govAuthority, runtime.EventService{}) bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore) // SDK module keepers // add keepers - app.AccountKeeper = authkeeper.NewAccountKeeper(appCodec, runtime.NewKVStoreService(keys[authtypes.StoreKey]), authtypes.ProtoBaseAccount, maccPerms, authcodec.NewBech32Codec(sdk.Bech32MainPrefix), sdk.Bech32MainPrefix, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.AccountKeeper = authkeeper.NewAccountKeeper(appCodec, runtime.NewKVStoreService(keys[authtypes.StoreKey]), authtypes.ProtoBaseAccount, maccPerms, authcodec.NewBech32Codec(sdk.Bech32MainPrefix), sdk.Bech32MainPrefix, govAuthority) app.BankKeeper = bankkeeper.NewBaseKeeper( appCodec, runtime.NewKVStoreService(keys[banktypes.StoreKey]), app.AccountKeeper, BlockedAddresses(), - authtypes.NewModuleAddress(govtypes.ModuleName).String(), + govAuthority, logger, ) app.StakingKeeper = stakingkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[stakingtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), authcodec.NewBech32Codec(sdk.Bech32PrefixValAddr), authcodec.NewBech32Codec(sdk.Bech32PrefixConsAddr), + appCodec, runtime.NewKVStoreService(keys[stakingtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, govAuthority, authcodec.NewBech32Codec(sdk.Bech32PrefixValAddr), authcodec.NewBech32Codec(sdk.Bech32PrefixConsAddr), ) - app.MintKeeper = mintkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[minttypes.StoreKey]), app.StakingKeeper, app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.MintKeeper = mintkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[minttypes.StoreKey]), app.StakingKeeper, app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName, govAuthority) - app.DistrKeeper = distrkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[distrtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, app.StakingKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.DistrKeeper = distrkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[distrtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, app.StakingKeeper, authtypes.FeeCollectorName, govAuthority) app.SlashingKeeper = slashingkeeper.NewKeeper( - appCodec, legacyAmino, runtime.NewKVStoreService(keys[slashingtypes.StoreKey]), app.StakingKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + appCodec, legacyAmino, runtime.NewKVStoreService(keys[slashingtypes.StoreKey]), app.StakingKeeper, govAuthority, ) invCheckPeriod := cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)) app.CrisisKeeper = crisiskeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[crisistypes.StoreKey]), invCheckPeriod, - app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), app.AccountKeeper.AddressCodec()) + app.BankKeeper, authtypes.FeeCollectorName, govAuthority, app.AccountKeeper.AddressCodec()) app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[feegrant.StoreKey]), app.AccountKeeper) @@ -319,7 +324,7 @@ func NewSimApp( stakingtypes.NewMultiStakingHooks(app.DistrKeeper.Hooks(), app.SlashingKeeper.Hooks()), ) - app.CircuitKeeper = circuitkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[circuittypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), app.AccountKeeper.AddressCodec()) + app.CircuitKeeper = circuitkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[circuittypes.StoreKey]), govAuthority, app.AccountKeeper.AddressCodec()) app.SetCircuitBreaker(&app.CircuitKeeper) app.AuthzKeeper = authzkeeper.NewKeeper(runtime.NewKVStoreService(keys[authzkeeper.StoreKey]), appCodec, app.MsgServiceRouter(), app.AccountKeeper) @@ -338,10 +343,10 @@ func NewSimApp( } homePath := cast.ToString(appOpts.Get(flags.FlagHome)) // set the governance module account as the authority for conducting upgrades - app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, govAuthority) app.IBCKeeper = ibckeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.UpgradeKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.UpgradeKeeper, govAuthority, ) govConfig := govtypes.DefaultConfig() @@ -351,7 +356,7 @@ func NewSimApp( */ govKeeper := govkeeper.NewKeeper( appCodec, runtime.NewKVStoreService(keys[govtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, - app.StakingKeeper, app.DistrKeeper, app.MsgServiceRouter(), govConfig, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + app.StakingKeeper, app.DistrKeeper, app.MsgServiceRouter(), govConfig, govAuthority, ) app.GovKeeper = *govKeeper.SetHooks( @@ -366,7 +371,7 @@ func NewSimApp( app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), - authtypes.NewModuleAddress(govtypes.ModuleName).String(), + govAuthority, ) // ICA Host keeper @@ -375,44 +380,42 @@ func NewSimApp( app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, app.AccountKeeper, app.MsgServiceRouter(), app.GRPCQueryRouter(), - authtypes.NewModuleAddress(govtypes.ModuleName).String(), + govAuthority, ) - // Packet Forward Middleware keeper - app.PFMKeeper = packetforwardkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[packetforwardtypes.StoreKey]), app.TransferKeeper, app.IBCKeeper.ChannelKeeper, app.BankKeeper, app.ICAControllerKeeper.GetICS4Wrapper(), authtypes.NewModuleAddress(govtypes.ModuleName).String()) - // Create IBC Router ibcRouter := porttypes.NewRouter() // Middleware Stacks + app.RateLimitKeeper = ratelimitkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[ratelimittypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ClientKeeper, app.BankKeeper, govAuthority) + // PacketForwardMiddleware must be created before TransferKeeper - app.PFMKeeper = packetforwardkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[packetforwardtypes.StoreKey]), nil, app.IBCKeeper.ChannelKeeper, app.BankKeeper, app.ICAControllerKeeper.GetICS4Wrapper(), authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.PFMKeeper = packetforwardkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[packetforwardtypes.StoreKey]), nil, app.IBCKeeper.ChannelKeeper, app.BankKeeper, app.RateLimitKeeper.ICS4Wrapper(), govAuthority) // Create Transfer Keeper - app.TransferKeeper = ibctransferkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), - app.IBCKeeper.ChannelKeeper, - app.IBCKeeper.ChannelKeeper, - app.MsgServiceRouter(), - app.AccountKeeper, app.BankKeeper, - authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) + app.TransferKeeper = ibctransferkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), app.AccountKeeper, app.BankKeeper, govAuthority) app.PFMKeeper.SetTransferKeeper(app.TransferKeeper) // Create Transfer Stack // SendPacket, since it is originating from the application to core IBC: - // transferKeeper.SendPacket -> channel.SendPacket + // transferKeeper.SendPacket -> Pf.SendPacket -> RateLim.SendPacket -> channel.SendPacket // RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way - // channel.RecvPacket -> transfer.OnRecvPacket + // channel.RecvPacket -> RateLim.OnRecvPacket -> Pf.OnRecvPacket -> transfer.OnRecvPacket // transfer stack contains (from top to bottom): + // - RateLimit + // - PacketForward // - Transfer // create IBC module from bottom to top of stack - transferStack := packetforward.NewIBCMiddleware(transfer.NewIBCModule(app.TransferKeeper), app.PFMKeeper, 0, packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp) + pfmMiddleware := packetforward.NewIBCMiddleware(transfer.NewIBCModule(app.TransferKeeper), app.PFMKeeper, 0, packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp) + transferStack := ratelimiting.NewIBCMiddleware(pfmMiddleware, app.RateLimitKeeper, app.IBCKeeper.ChannelKeeper) + + app.PFMKeeper.SetICS4Wrapper(transferStack) + app.RateLimitKeeper.SetICS4Wrapper(app.IBCKeeper.ChannelKeeper) app.TransferKeeper.WithICS4Wrapper(app.PFMKeeper) // Add transfer stack to IBC Router @@ -489,6 +492,7 @@ func NewSimApp( transfer.NewAppModule(app.TransferKeeper), ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), packetforward.NewAppModule(app.PFMKeeper), + ratelimiting.NewAppModule(app.RateLimitKeeper), // IBC light clients ibctm.NewAppModule(tmLightClientModule), @@ -530,6 +534,7 @@ func NewSimApp( genutiltypes.ModuleName, authz.ModuleName, icatypes.ModuleName, + ratelimittypes.ModuleName, ) app.ModuleManager.SetOrderEndBlockers( crisistypes.ModuleName, @@ -542,6 +547,7 @@ func NewSimApp( feegrant.ModuleName, icatypes.ModuleName, group.ModuleName, + ratelimittypes.ModuleName, ) // NOTE: The genutils module must occur after staking so that pools are @@ -553,7 +559,7 @@ func NewSimApp( slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, crisistypes.ModuleName, ibcexported.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, packetforwardtypes.ModuleName, icatypes.ModuleName, feegrant.ModuleName, upgradetypes.ModuleName, - vestingtypes.ModuleName, group.ModuleName, consensusparamtypes.ModuleName, circuittypes.ModuleName, + vestingtypes.ModuleName, group.ModuleName, consensusparamtypes.ModuleName, circuittypes.ModuleName, ratelimittypes.ModuleName, } app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...) app.ModuleManager.SetOrderExportGenesis(genesisModuleOrder...) diff --git a/simapp/go.mod b/simapp/go.mod index 2e8184a101e..e87545a3ac1 100644 --- a/simapp/go.mod +++ b/simapp/go.mod @@ -39,7 +39,7 @@ require ( cloud.google.com/go/iam v1.2.2 // indirect cloud.google.com/go/monitoring v1.21.2 // indirect cloud.google.com/go/storage v1.49.0 // indirect - cosmossdk.io/collections v1.3.0 // indirect + cosmossdk.io/collections v1.3.1 // indirect cosmossdk.io/depinject v1.2.1 // indirect cosmossdk.io/errors v1.0.2 // indirect cosmossdk.io/math v1.5.3 // indirect @@ -97,7 +97,7 @@ require ( github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/getsentry/sentry-go v0.32.0 // indirect + github.com/getsentry/sentry-go v0.33.0 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect diff --git a/simapp/go.sum b/simapp/go.sum index 81d0407c8fd..3e2a2abe076 100644 --- a/simapp/go.sum +++ b/simapp/go.sum @@ -618,8 +618,8 @@ cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/client/v2 v2.0.0-beta.9 h1:xc06zg4G858/pK5plhf8RCfo+KR2mdDKJNrEkfrVAqc= cosmossdk.io/client/v2 v2.0.0-beta.9/go.mod h1:pHf3CCHX5gmbL9rDCVbXhGI2+/DdAVTEZSLpdd5V9Zs= -cosmossdk.io/collections v1.3.0 h1:RUY23xXBy/bu5oSHZ5y+mkJRyA4ZboKDO4Yvx4+g2uc= -cosmossdk.io/collections v1.3.0/go.mod h1:cqVpBMDGEYhuNmNSXIOmqpnQ7Eav43hpJIetzLuEkns= +cosmossdk.io/collections v1.3.1 h1:09e+DUId2brWsNOQ4nrk+bprVmMUaDH9xvtZkeqIjVw= +cosmossdk.io/collections v1.3.1/go.mod h1:ynvkP0r5ruAjbmedE+vQ07MT6OtJ0ZIDKrtJHK7Q/4c= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= @@ -923,8 +923,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= -github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/getsentry/sentry-go v0.33.0 h1:YWyDii0KGVov3xOaamOnF0mjOrqSjBqwv48UEzn7QFg= +github.com/getsentry/sentry-go v0.33.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= diff --git a/testing/coordinator.go b/testing/coordinator.go index 301c2f4f8d1..20c7900eb29 100644 --- a/testing/coordinator.go +++ b/testing/coordinator.go @@ -65,6 +65,13 @@ func (coord *Coordinator) IncrementTimeBy(increment time.Duration) { coord.UpdateTime() } +// SetTime sets the coordinator's current time to the specified time and updates +// the proposed header time for all chains. +func (coord *Coordinator) SetTime(t time.Time) { + coord.CurrentTime = t.UTC() + coord.UpdateTime() +} + // UpdateTime updates all clocks for the TestChains to the current global time. func (coord *Coordinator) UpdateTime() { for _, chain := range coord.Chains { diff --git a/testing/coordinator_test.go b/testing/coordinator_test.go new file mode 100644 index 00000000000..4337feef485 --- /dev/null +++ b/testing/coordinator_test.go @@ -0,0 +1,38 @@ +package ibctesting_test + +import ( + "fmt" + "testing" + + testifysuite "github.com/stretchr/testify/suite" + + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +type CoordinatorTestSuite struct { + testifysuite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +func (s *CoordinatorTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) +} + +func TestCoordinatorTestSuite(t *testing.T) { + testifysuite.Run(t, new(CoordinatorTestSuite)) +} + +func (s *CoordinatorTestSuite) TestChainCodecRootResolveNotSet() { + resolved, err := s.chainA.Codec.InterfaceRegistry().Resolve("/") + s.Require().Error(err, fmt.Sprintf("Root typeUrl should not be resolvable: %T", resolved)) + + resolved, err = s.chainB.Codec.InterfaceRegistry().Resolve("/") + s.Require().Error(err, fmt.Sprintf("Root typeUrl should not be resolvable: %T", resolved)) +} diff --git a/testing/simapp/app.go b/testing/simapp/app.go index 0799e699532..2de87708e59 100644 --- a/testing/simapp/app.go +++ b/testing/simapp/app.go @@ -90,6 +90,9 @@ import ( packetforward "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware" packetforwardkeeper "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" packetforwardtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + ratelimiting "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting" + ratelimitkeeper "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + ratelimittypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" transfer "github.com/cosmos/ibc-go/v10/modules/apps/transfer" ibctransferkeeper "github.com/cosmos/ibc-go/v10/modules/apps/transfer/keeper" ibctransfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" @@ -156,6 +159,7 @@ type SimApp struct { TransferKeeper ibctransferkeeper.Keeper ConsensusParamsKeeper consensusparamkeeper.Keeper PFMKeeper *packetforwardkeeper.Keeper + RateLimitKeeper ratelimitkeeper.Keeper // make IBC modules public for test purposes // these modules are never directly routed to by the IBC Router @@ -250,6 +254,7 @@ func NewSimApp( govtypes.StoreKey, group.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, packetforwardtypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, authzkeeper.StoreKey, consensusparamtypes.StoreKey, + ratelimittypes.StoreKey, ) // register streaming services @@ -358,18 +363,11 @@ func NewSimApp( // Middleware Stacks + app.RateLimitKeeper = ratelimitkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[ratelimittypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ClientKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String()) // PacketForwardMiddleware must be created before TransferKeeper - app.PFMKeeper = packetforwardkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[packetforwardtypes.StoreKey]), nil, app.IBCKeeper.ChannelKeeper, app.BankKeeper, app.ICAControllerKeeper.GetICS4Wrapper(), authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.PFMKeeper = packetforwardkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[packetforwardtypes.StoreKey]), nil, app.IBCKeeper.ChannelKeeper, app.BankKeeper, app.IBCKeeper.ChannelKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String()) - // Create Transfer Keeper - app.TransferKeeper = ibctransferkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), - app.IBCKeeper.ChannelKeeper, - app.IBCKeeper.ChannelKeeper, - app.MsgServiceRouter(), - app.AccountKeeper, app.BankKeeper, - authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) + app.TransferKeeper = ibctransferkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), app.AccountKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String()) app.PFMKeeper.SetTransferKeeper(app.TransferKeeper) @@ -400,11 +398,23 @@ func NewSimApp( // channel.RecvPacket -> transfer.OnRecvPacket // create IBC module from bottom to top of stack - transferStack := packetforward.NewIBCMiddleware(transfer.NewIBCModule(app.TransferKeeper), app.PFMKeeper, 0, packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp) + // - Core + // - Rate Limit + // - Packet Forward Middleware + // - Transfer + transferStack := transfer.NewIBCModule(app.TransferKeeper) + transferIBCModule := packetforward.NewIBCMiddleware(transferStack, app.PFMKeeper, 0, packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp) + + // Create the rate-limiting middleware, wrapping the transfer IBC module + // The ICS4Wrapper is the IBC ChannelKeeper + rateLimitMiddleware := ratelimiting.NewIBCMiddleware(transferIBCModule, app.RateLimitKeeper, app.IBCKeeper.ChannelKeeper) + + app.PFMKeeper.SetICS4Wrapper(rateLimitMiddleware) + app.RateLimitKeeper.SetICS4Wrapper(app.IBCKeeper.ChannelKeeper) app.TransferKeeper.WithICS4Wrapper(app.PFMKeeper) - // Add transfer stack to IBC Router - ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack) + // Add transfer stack with rate-limiting middleware to IBC Router + ibcRouter.AddRoute(ibctransfertypes.ModuleName, rateLimitMiddleware) // Create Interchain Accounts Stack // SendPacket, since it is originating from the application to core IBC: @@ -480,6 +490,7 @@ func NewSimApp( // IBC modules ibc.NewAppModule(app.IBCKeeper), transfer.NewAppModule(app.TransferKeeper), + ratelimiting.NewAppModule(app.RateLimitKeeper), // Use correct package alias ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), mockModule, packetforward.NewAppModule(app.PFMKeeper), @@ -523,6 +534,7 @@ func NewSimApp( genutiltypes.ModuleName, authz.ModuleName, icatypes.ModuleName, + ratelimittypes.ModuleName, ibcmock.ModuleName, ) app.ModuleManager.SetOrderEndBlockers( @@ -544,7 +556,7 @@ func NewSimApp( authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName, slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, - ibcexported.ModuleName, genutiltypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, + ibcexported.ModuleName, genutiltypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, ratelimittypes.ModuleName, packetforwardtypes.ModuleName, icatypes.ModuleName, ibcmock.ModuleName, upgradetypes.ModuleName, vestingtypes.ModuleName, group.ModuleName, consensusparamtypes.ModuleName, }