diff --git a/.gitignore b/.gitignore index 6a2470a2..9fe843ca 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,11 @@ jepsen/.lein-* jepsen/.nrepl-port .m2/ jepsen/store/ + +# Jepsen local SSH keys (generated locally; never commit) +jepsen/docker/id_rsa +jepsen/.ssh/ + +# Build and lint cache directories +.cache/ +.golangci-cache/ diff --git a/Dockerfile b/Dockerfile index 429364b9..836fa997 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM golang:latest AS build WORKDIR $GOPATH/src/app COPY . . -RUN CGO_ENABLED=0 go build -o /app main.go +RUN CGO_ENABLED=0 go build -o /app . FROM gcr.io/distroless/static:latest COPY --from=build /app /app diff --git a/adapter/dynamodb.go b/adapter/dynamodb.go index 857701ab..94220ccd 100644 --- a/adapter/dynamodb.go +++ b/adapter/dynamodb.go @@ -32,7 +32,7 @@ type DynamoDBServer struct { httpServer *http.Server } -func NewDynamoDBServer(listen net.Listener, st store.MVCCStore, coordinate *kv.Coordinate) *DynamoDBServer { +func NewDynamoDBServer(listen net.Listener, st store.MVCCStore, coordinate kv.Coordinator) *DynamoDBServer { d := &DynamoDBServer{ listen: listen, store: st, @@ -85,7 +85,7 @@ func (d *DynamoDBServer) putItem(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - if _, err = d.coordinator.Dispatch(reqs); err != nil { + if _, err = d.coordinator.Dispatch(r.Context(), reqs); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -195,7 +195,7 @@ func (d *DynamoDBServer) updateItem(w http.ResponseWriter, r *http.Request) { IsTxn: false, Elems: []*kv.Elem[kv.OP]{elem}, } - if _, err = d.coordinator.Dispatch(req); err != nil { + if _, err = d.coordinator.Dispatch(r.Context(), req); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -214,7 +214,7 @@ func (d *DynamoDBServer) transactWriteItems(w http.ResponseWriter, r *http.Reque http.Error(w, err.Error(), http.StatusBadRequest) return } - if _, err = d.coordinator.Dispatch(reqs); err != nil { + if _, err = d.coordinator.Dispatch(r.Context(), reqs); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } diff --git a/adapter/grpc.go b/adapter/grpc.go index b4cbc7cb..aa74f8fa 100644 --- a/adapter/grpc.go +++ b/adapter/grpc.go @@ -4,6 +4,7 @@ import ( "context" "log/slog" "os" + "sync" "github.com/bootjp/elastickv/internal" "github.com/bootjp/elastickv/kv" @@ -11,8 +12,6 @@ import ( "github.com/bootjp/elastickv/store" "github.com/cockroachdb/errors" "github.com/spaolacci/murmur3" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" ) var _ pb.RawKVServer = (*GRPCServer)(nil) @@ -24,12 +23,24 @@ type GRPCServer struct { coordinator kv.Coordinator store store.MVCCStore + closeStore bool + closeOnce sync.Once + closeErr error + pb.UnimplementedRawKVServer pb.UnimplementedTransactionalKVServer } -func NewGRPCServer(store store.MVCCStore, coordinate *kv.Coordinate) *GRPCServer { - return &GRPCServer{ +type GRPCServerOption func(*GRPCServer) + +func WithCloseStore() GRPCServerOption { + return func(s *GRPCServer) { + s.closeStore = true + } +} + +func NewGRPCServer(store store.MVCCStore, coordinate kv.Coordinator, opts ...GRPCServerOption) *GRPCServer { + s := &GRPCServer{ log: slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ Level: slog.LevelWarn, })), @@ -37,83 +48,126 @@ func NewGRPCServer(store store.MVCCStore, coordinate *kv.Coordinate) *GRPCServer coordinator: coordinate, store: store, } + for _, opt := range opts { + if opt == nil { + continue + } + opt(s) + } + return s } -func (r GRPCServer) RawGet(ctx context.Context, req *pb.RawGetRequest) (*pb.RawGetResponse, error) { - readTS := req.GetTs() - if readTS == 0 { - readTS = snapshotTS(r.coordinator.Clock(), r.store) +func (r *GRPCServer) Close() error { + if r == nil { + return nil } - - if r.coordinator.IsLeader() { - v, err := r.store.GetAt(ctx, req.Key, readTS) - if err != nil { - switch { - case errors.Is(err, store.ErrKeyNotFound): - return &pb.RawGetResponse{ - Value: nil, - }, nil - default: - return nil, errors.WithStack(err) - } + r.closeOnce.Do(func() { + if !r.closeStore || r.store == nil { + return + } + if err := r.store.Close(); err != nil { + r.closeErr = errors.WithStack(err) } - r.log.InfoContext(ctx, "Get", - slog.String("key", string(req.Key)), - slog.String("value", string(v))) + }) + return r.closeErr +} + +func (r *GRPCServer) clock() *kv.HLC { + if r == nil || r.coordinator == nil { + return nil + } + return r.coordinator.Clock() +} - return &pb.RawGetResponse{ - Value: v, - }, nil +func (r *GRPCServer) RawGet(ctx context.Context, req *pb.RawGetRequest) (*pb.RawGetResponse, error) { + readTS := req.GetTs() + if readTS == 0 { + readTS = snapshotTS(r.clock(), r.store) } - v, err := r.tryLeaderGet(req.Key) + v, err := r.store.GetAt(ctx, req.Key, readTS) + if errors.Is(err, store.ErrKeyNotFound) { + return &pb.RawGetResponse{Value: nil}, nil + } if err != nil { - return &pb.RawGetResponse{ - Value: nil, - }, err + return nil, errors.WithStack(err) } r.log.InfoContext(ctx, "Get", slog.String("key", string(req.Key)), slog.String("value", string(v))) - return &pb.RawGetResponse{ - Value: v, - }, nil + return &pb.RawGetResponse{Value: v}, nil } -func (r GRPCServer) tryLeaderGet(key []byte) ([]byte, error) { - addr := r.coordinator.RaftLeader() - if addr == "" { - return nil, ErrLeaderNotFound +func (r *GRPCServer) RawLatestCommitTS(ctx context.Context, req *pb.RawLatestCommitTSRequest) (*pb.RawLatestCommitTSResponse, error) { + key := req.GetKey() + if len(key) == 0 { + return nil, errors.WithStack(kv.ErrInvalidRequest) } - conn, err := grpc.NewClient(string(addr), - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - ) + ts, exists, err := r.store.LatestCommitTS(ctx, key) if err != nil { return nil, errors.WithStack(err) } - defer conn.Close() + return &pb.RawLatestCommitTSResponse{ + Ts: ts, + Exists: exists, + }, nil +} - cli := pb.NewRawKVClient(conn) - ts := snapshotTS(r.coordinator.Clock(), r.store) - resp, err := cli.RawGet(context.Background(), &pb.RawGetRequest{Key: key, Ts: ts}) +func (r *GRPCServer) RawScanAt(ctx context.Context, req *pb.RawScanAtRequest) (*pb.RawScanAtResponse, error) { + limit64 := req.GetLimit() + limit, err := rawScanLimit(limit64) if err != nil { - return nil, errors.WithStack(err) + return &pb.RawScanAtResponse{Kv: nil}, err } - return resp.Value, nil + readTS := req.GetTs() + if readTS == 0 { + readTS = snapshotTS(r.clock(), r.store) + } + + res, err := r.store.ScanAt(ctx, req.StartKey, req.EndKey, limit, readTS) + if err != nil { + return &pb.RawScanAtResponse{Kv: nil}, errors.WithStack(err) + } + + return &pb.RawScanAtResponse{Kv: rawKvPairs(res)}, nil +} + +func rawScanLimit(limit64 int64) (int, error) { + if limit64 < 0 { + return 0, errors.WithStack(kv.ErrInvalidRequest) + } + maxInt64 := int64(^uint(0) >> 1) + if limit64 > maxInt64 { + return 0, errors.WithStack(internal.ErrIntOverflow) + } + return int(limit64), nil +} + +func rawKvPairs(res []*store.KVPair) []*pb.RawKVPair { + out := make([]*pb.RawKVPair, 0, len(res)) + for _, kvp := range res { + if kvp == nil { + continue + } + out = append(out, &pb.RawKVPair{ + Key: kvp.Key, + Value: kvp.Value, + }) + } + return out } -func (r GRPCServer) RawPut(_ context.Context, req *pb.RawPutRequest) (*pb.RawPutResponse, error) { +func (r *GRPCServer) RawPut(ctx context.Context, req *pb.RawPutRequest) (*pb.RawPutResponse, error) { m, err := r.grpcTranscoder.RawPutToRequest(req) if err != nil { return nil, errors.WithStack(err) } - res, err := r.coordinator.Dispatch(m) + res, err := r.coordinator.Dispatch(ctx, m) if err != nil { return &pb.RawPutResponse{ CommitIndex: uint64(0), @@ -127,13 +181,13 @@ func (r GRPCServer) RawPut(_ context.Context, req *pb.RawPutRequest) (*pb.RawPut }, nil } -func (r GRPCServer) RawDelete(ctx context.Context, req *pb.RawDeleteRequest) (*pb.RawDeleteResponse, error) { +func (r *GRPCServer) RawDelete(ctx context.Context, req *pb.RawDeleteRequest) (*pb.RawDeleteResponse, error) { m, err := r.grpcTranscoder.RawDeleteToRequest(req) if err != nil { return nil, errors.WithStack(err) } - res, err := r.coordinator.Dispatch(m) + res, err := r.coordinator.Dispatch(ctx, m) if err != nil { return &pb.RawDeleteResponse{ CommitIndex: uint64(0), @@ -147,19 +201,19 @@ func (r GRPCServer) RawDelete(ctx context.Context, req *pb.RawDeleteRequest) (*p }, nil } -func (r GRPCServer) PreWrite(ctx context.Context, req *pb.PreWriteRequest) (*pb.PreCommitResponse, error) { +func (r *GRPCServer) PreWrite(ctx context.Context, req *pb.PreWriteRequest) (*pb.PreCommitResponse, error) { return nil, kv.ErrNotImplemented } -func (r GRPCServer) Commit(ctx context.Context, req *pb.CommitRequest) (*pb.CommitResponse, error) { +func (r *GRPCServer) Commit(ctx context.Context, req *pb.CommitRequest) (*pb.CommitResponse, error) { return nil, kv.ErrNotImplemented } -func (r GRPCServer) Rollback(ctx context.Context, req *pb.RollbackRequest) (*pb.RollbackResponse, error) { +func (r *GRPCServer) Rollback(ctx context.Context, req *pb.RollbackRequest) (*pb.RollbackResponse, error) { return nil, kv.ErrNotImplemented } -func (r GRPCServer) Put(ctx context.Context, req *pb.PutRequest) (*pb.PutResponse, error) { +func (r *GRPCServer) Put(ctx context.Context, req *pb.PutRequest) (*pb.PutResponse, error) { reqs, err := r.grpcTranscoder.TransactionalPutToRequests(req) if err != nil { return nil, errors.WithStack(err) @@ -167,7 +221,7 @@ func (r GRPCServer) Put(ctx context.Context, req *pb.PutRequest) (*pb.PutRespons r.log.InfoContext(ctx, "Put", slog.Any("reqs", reqs)) - res, err := r.coordinator.Dispatch(reqs) + res, err := r.coordinator.Dispatch(ctx, reqs) if err != nil { return &pb.PutResponse{ CommitIndex: uint64(0), @@ -180,13 +234,13 @@ func (r GRPCServer) Put(ctx context.Context, req *pb.PutRequest) (*pb.PutRespons }, nil } -func (r GRPCServer) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetResponse, error) { +func (r *GRPCServer) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetResponse, error) { h := murmur3.New64() if _, err := h.Write(req.Key); err != nil { return nil, errors.WithStack(err) } - readTS := snapshotTS(r.coordinator.Clock(), r.store) + readTS := snapshotTS(r.clock(), r.store) v, err := r.store.GetAt(ctx, req.Key, readTS) if err != nil { switch { @@ -206,7 +260,7 @@ func (r GRPCServer) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetRespons }, nil } -func (r GRPCServer) Delete(ctx context.Context, req *pb.DeleteRequest) (*pb.DeleteResponse, error) { +func (r *GRPCServer) Delete(ctx context.Context, req *pb.DeleteRequest) (*pb.DeleteResponse, error) { reqs, err := r.grpcTranscoder.TransactionalDeleteToRequests(req) if err != nil { return nil, errors.WithStack(err) @@ -214,7 +268,7 @@ func (r GRPCServer) Delete(ctx context.Context, req *pb.DeleteRequest) (*pb.Dele r.log.InfoContext(ctx, "Delete", slog.Any("reqs", reqs)) - res, err := r.coordinator.Dispatch(reqs) + res, err := r.coordinator.Dispatch(ctx, reqs) if err != nil { return &pb.DeleteResponse{ CommitIndex: uint64(0), @@ -227,14 +281,14 @@ func (r GRPCServer) Delete(ctx context.Context, req *pb.DeleteRequest) (*pb.Dele }, nil } -func (r GRPCServer) Scan(ctx context.Context, req *pb.ScanRequest) (*pb.ScanResponse, error) { +func (r *GRPCServer) Scan(ctx context.Context, req *pb.ScanRequest) (*pb.ScanResponse, error) { limit, err := internal.Uint64ToInt(req.Limit) if err != nil { return &pb.ScanResponse{ Kv: nil, }, errors.WithStack(err) } - readTS := snapshotTS(r.coordinator.Clock(), r.store) + readTS := snapshotTS(r.clock(), r.store) res, err := r.store.ScanAt(ctx, req.StartKey, req.EndKey, limit, readTS) if err != nil { return &pb.ScanResponse{ diff --git a/adapter/redis.go b/adapter/redis.go index ff233e83..dd85b496 100644 --- a/adapter/redis.go +++ b/adapter/redis.go @@ -3,12 +3,12 @@ package adapter import ( "bytes" "context" - "fmt" "math" "net" "sort" "strconv" "strings" + "time" "github.com/bootjp/elastickv/kv" pb "github.com/bootjp/elastickv/proto" @@ -21,19 +21,39 @@ import ( "google.golang.org/grpc/credentials/insecure" ) +const ( + cmdGet = "GET" + cmdSet = "SET" + cmdDel = "DEL" + cmdExists = "EXISTS" + cmdPing = "PING" + cmdKeys = "KEYS" + cmdMulti = "MULTI" + cmdExec = "EXEC" + cmdDiscard = "DISCARD" + cmdLRange = "LRANGE" + cmdRPush = "RPUSH" + minKeyedArgs = 2 +) + +const ( + redisLatestCommitTimeout = 5 * time.Second + redisDispatchTimeout = 10 * time.Second +) + //nolint:mnd var argsLen = map[string]int{ - "GET": 2, - "SET": 3, - "DEL": 2, - "EXISTS": 2, - "PING": 1, - "KEYS": 2, - "MULTI": 1, - "EXEC": 1, - "DISCARD": 1, - "LRANGE": 4, - "RPUSH": -3, // negative means minimum number of args + cmdGet: 2, + cmdSet: 3, + cmdDel: 2, + cmdExists: 2, + cmdPing: 1, + cmdKeys: 2, + cmdMulti: 1, + cmdExec: 1, + cmdDiscard: 1, + cmdLRange: 4, + cmdRPush: -3, // negative means minimum number of args } type RedisServer struct { @@ -72,7 +92,7 @@ type redisResult struct { err error } -func NewRedisServer(listen net.Listener, store store.MVCCStore, coordinate *kv.Coordinate, leaderRedis map[raft.ServerAddress]string) *RedisServer { +func NewRedisServer(listen net.Listener, store store.MVCCStore, coordinate kv.Coordinator, leaderRedis map[raft.ServerAddress]string) *RedisServer { r := &RedisServer{ listen: listen, store: store, @@ -82,17 +102,17 @@ func NewRedisServer(listen net.Listener, store store.MVCCStore, coordinate *kv.C } r.route = map[string]func(conn redcon.Conn, cmd redcon.Command){ - "PING": r.ping, - "SET": r.set, - "GET": r.get, - "DEL": r.del, - "EXISTS": r.exists, - "KEYS": r.keys, - "MULTI": r.multi, - "EXEC": r.exec, - "DISCARD": r.discard, - "RPUSH": r.rpush, - "LRANGE": r.lrange, + cmdPing: r.ping, + cmdSet: r.set, + cmdGet: r.get, + cmdDel: r.del, + cmdExists: r.exists, + cmdKeys: r.keys, + cmdMulti: r.multi, + cmdExec: r.exec, + cmdDiscard: r.discard, + cmdRPush: r.rpush, + cmdLRange: r.lrange, } return r @@ -129,7 +149,7 @@ func (r *RedisServer) Run() error { } name := strings.ToUpper(string(cmd.Args[0])) - if state.inTxn && name != "EXEC" && name != "DISCARD" && name != "MULTI" { + if state.inTxn && name != cmdExec && name != cmdDiscard && name != cmdMulti { state.queue = append(state.queue, cmd) conn.WriteString("QUEUED") return @@ -193,7 +213,9 @@ func (r *RedisServer) set(conn redcon.Conn, cmd redcon.Command) { return } - _, err = r.coordinator.Dispatch(res) + ctx, cancel := context.WithTimeout(context.Background(), redisDispatchTimeout) + defer cancel() + _, err = r.coordinator.Dispatch(ctx, res) if err != nil { conn.WriteError(err.Error()) return @@ -245,7 +267,9 @@ func (r *RedisServer) del(conn redcon.Conn, cmd redcon.Command) { return } - _, err = r.coordinator.Dispatch(res) + ctx, cancel := context.WithTimeout(context.Background(), redisDispatchTimeout) + defer cancel() + _, err = r.coordinator.Dispatch(ctx, res) if err != nil { conn.WriteError(err.Error()) return @@ -255,7 +279,7 @@ func (r *RedisServer) del(conn redcon.Conn, cmd redcon.Command) { } func (r *RedisServer) exists(conn redcon.Conn, cmd redcon.Command) { - if !r.coordinator.IsLeader() { + if !r.coordinator.IsLeaderForKey(cmd.Args[1]) { res, err := r.proxyExists(cmd.Args[1]) if err != nil { conn.WriteError(err.Error()) @@ -265,7 +289,7 @@ func (r *RedisServer) exists(conn redcon.Conn, cmd redcon.Command) { return } - if err := r.coordinator.VerifyLeader(); err != nil { + if err := r.coordinator.VerifyLeaderForKey(cmd.Args[1]); err != nil { conn.WriteError(err.Error()) return } @@ -434,15 +458,6 @@ func (r *RedisServer) exec(conn redcon.Conn, _ redcon.Command) { return } - if !r.coordinator.IsLeader() { - if err := r.proxyExec(conn, state.queue); err != nil { - conn.WriteError(err.Error()) - } - state.inTxn = false - state.queue = nil - return - } - results, err := r.runTransaction(state.queue) state.inTxn = false state.queue = nil @@ -517,17 +532,17 @@ func (t *txnContext) listLength(st *listTxnState) int64 { func (t *txnContext) apply(cmd redcon.Command) (redisResult, error) { switch strings.ToUpper(string(cmd.Args[0])) { - case "SET": + case cmdSet: return t.applySet(cmd) - case "DEL": + case cmdDel: return t.applyDel(cmd) - case "GET": + case cmdGet: return t.applyGet(cmd) - case "EXISTS": + case cmdExists: return t.applyExists(cmd) - case "RPUSH": + case cmdRPush: return t.applyRPush(cmd) - case "LRANGE": + case cmdLRange: return t.applyLRange(cmd) default: return redisResult{}, errors.WithStack(errors.Newf("ERR unsupported command '%s'", cmd.Args[0])) @@ -696,7 +711,9 @@ func (t *txnContext) commit() error { } group := &kv.OperationGroup[kv.OP]{IsTxn: true, Elems: elems, StartTS: t.startTS} - if _, err := t.server.coordinator.Dispatch(group); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), redisDispatchTimeout) + defer cancel() + if _, err := t.server.coordinator.Dispatch(ctx, group); err != nil { return errors.WithStack(err) } return nil @@ -770,13 +787,9 @@ func (t *txnContext) buildListElems() ([]*kv.Elem[kv.OP], error) { } func (r *RedisServer) runTransaction(queue []redcon.Command) ([]redisResult, error) { - if err := r.coordinator.VerifyLeader(); err != nil { - return nil, errors.WithStack(err) - } - - startTS := r.coordinator.Clock().Next() - if last := r.store.LastCommitTS(); last > startTS { - startTS = last + startTS, err := r.txnStartTS(queue) + if err != nil { + return nil, err } ctx := &txnContext{ @@ -802,52 +815,52 @@ func (r *RedisServer) runTransaction(queue []redcon.Command) ([]redisResult, err return results, nil } -func (r *RedisServer) proxyExec(conn redcon.Conn, queue []redcon.Command) error { - leader := r.coordinator.RaftLeader() - if leader == "" { - return ErrLeaderNotFound +func (r *RedisServer) txnStartTS(queue []redcon.Command) (uint64, error) { + ctx, cancel := context.WithTimeout(context.Background(), redisLatestCommitTimeout) + defer cancel() + + maxTS, err := r.maxLatestCommitTS(ctx, queue) + if err != nil { + return 0, err } - leaderAddr, ok := r.leaderRedis[leader] - if !ok || leaderAddr == "" { - return errors.WithStack(errors.Newf("leader redis address unknown for %s", leader)) + if r.coordinator != nil && r.coordinator.Clock() != nil && maxTS > 0 { + r.coordinator.Clock().Observe(maxTS) } + if r.coordinator == nil || r.coordinator.Clock() == nil { + return maxTS + 1, nil + } + return r.coordinator.Clock().Next(), nil +} - cli := redis.NewClient(&redis.Options{Addr: leaderAddr}) - defer func() { _ = cli.Close() }() - - ctx := context.Background() - cmds := make([]redis.Cmder, len(queue)) - names := make([]string, len(queue)) - _, err := cli.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - for i, c := range queue { - name := strings.ToUpper(string(c.Args[0])) - names[i] = name - args := make([]string, 0, len(c.Args)-1) - for _, a := range c.Args[1:] { - args = append(args, string(a)) - } - cmd := newProxyCmd(name, args, ctx) - _ = pipe.Process(ctx, cmd) - cmds[i] = cmd - } - return nil - }) - if err != nil { - return errors.WithStack(err) +func (r *RedisServer) maxLatestCommitTS(ctx context.Context, queue []redcon.Command) (uint64, error) { + if r.store == nil { + return 0, nil } - results := make([]redisResult, 0, len(cmds)) - for i, cmd := range cmds { - res, err := buildProxyResult(names[i], cmd) - if err != nil { - results = append(results, redisResult{typ: resultError, err: err}) + // NOTE: This currently calls LatestCommitTS for each (unique) key involved in + // the transaction. kv.MaxLatestCommitTS deduplicates keys and performs the + // lookups in parallel, but very large transactions can still make this a + // latency hot path. If needed, add batching/caching at the storage layer. + const txnLatestCommitKeysPerCmd = 2 + keys := make([][]byte, 0, len(queue)*txnLatestCommitKeysPerCmd) + for _, cmd := range queue { + if len(cmd.Args) < minKeyedArgs { continue } - results = append(results, res) + name := strings.ToUpper(string(cmd.Args[0])) + switch name { + case cmdSet, cmdGet, cmdDel, cmdExists, cmdRPush, cmdLRange: + key := cmd.Args[1] + keys = append(keys, key) + // Also account for list metadata keys to avoid stale typing decisions. + keys = append(keys, listMetaKey(key)) + } } - - r.writeResults(conn, results) - return nil + ts, err := kv.MaxLatestCommitTS(ctx, r.store, keys) + if err != nil { + return 0, errors.WithStack(err) + } + return ts, nil } func (r *RedisServer) writeResults(conn redcon.Conn, results []redisResult) { @@ -875,53 +888,6 @@ func (r *RedisServer) writeResults(conn redcon.Conn, results []redisResult) { } } -// --- list helpers ---------------------------------------------------- -func buildProxyResult(_ string, cmd redis.Cmder) (redisResult, error) { - switch c := cmd.(type) { - case *redis.StatusCmd: - s, err := c.Result() - return redisResult{typ: resultString, str: s}, errors.WithStack(err) - case *redis.IntCmd: - i, err := c.Result() - return redisResult{typ: resultInt, integer: i}, errors.WithStack(err) - case *redis.StringCmd: - b, err := c.Bytes() - if errors.Is(err, redis.Nil) { - return redisResult{typ: resultNil}, nil - } - return redisResult{typ: resultBulk, bulk: b}, errors.WithStack(err) - case *redis.StringSliceCmd: - arr, err := c.Result() - return redisResult{typ: resultArray, arr: arr}, errors.WithStack(err) - case *redis.Cmd: - v, err := c.Result() - return redisResult{typ: resultString, str: fmt.Sprint(v)}, errors.WithStack(err) - default: - return redisResult{typ: resultError, err: errors.Newf("unsupported command result type %T", cmd)}, nil - } -} - -func newProxyCmd(name string, args []string, ctx context.Context) redis.Cmder { - argv := make([]any, 0, len(args)+1) - argv = append(argv, name) - for _, a := range args { - argv = append(argv, a) - } - - switch name { - case "SET": - return redis.NewStatusCmd(ctx, argv...) - case "DEL", "EXISTS", "RPUSH": - return redis.NewIntCmd(ctx, argv...) - case "GET": - return redis.NewStringCmd(ctx, argv...) - case "LRANGE": - return redis.NewStringSliceCmd(ctx, argv...) - default: - return redis.NewCmd(ctx, argv...) - } -} - func listMetaKey(userKey []byte) []byte { return store.ListMetaKey(userKey) } @@ -1018,7 +984,7 @@ func (r *RedisServer) listRPush(ctx context.Context, key []byte, values [][]byte } group := &kv.OperationGroup[kv.OP]{IsTxn: true, Elems: ops} - if _, err := r.coordinator.Dispatch(group); err != nil { + if _, err := r.coordinator.Dispatch(ctx, group); err != nil { return 0, errors.WithStack(err) } return newMeta.Len, nil @@ -1053,7 +1019,7 @@ func (r *RedisServer) deleteList(ctx context.Context, key []byte) error { _ = meta group := &kv.OperationGroup[kv.OP]{IsTxn: true, Elems: ops} - _, err = r.coordinator.Dispatch(group) + _, err = r.coordinator.Dispatch(ctx, group) return errors.WithStack(err) } @@ -1082,11 +1048,11 @@ func (r *RedisServer) fetchListRange(ctx context.Context, key []byte, meta store func (r *RedisServer) rangeList(key []byte, startRaw, endRaw []byte) ([]string, error) { readTS := r.readTS() - if !r.coordinator.IsLeader() { + if !r.coordinator.IsLeaderForKey(key) { return r.proxyLRange(key, startRaw, endRaw) } - if err := r.coordinator.VerifyLeader(); err != nil { + if err := r.coordinator.VerifyLeaderForKey(key); err != nil { return nil, errors.WithStack(err) } @@ -1116,7 +1082,7 @@ func (r *RedisServer) rangeList(key []byte, startRaw, endRaw []byte) ([]string, } func (r *RedisServer) proxyLRange(key []byte, startRaw, endRaw []byte) ([]string, error) { - leader := r.coordinator.RaftLeader() + leader := r.coordinator.RaftLeaderForKey(key) if leader == "" { return nil, ErrLeaderNotFound } @@ -1142,7 +1108,7 @@ func (r *RedisServer) proxyLRange(key []byte, startRaw, endRaw []byte) ([]string } func (r *RedisServer) proxyRPush(key []byte, values [][]byte) (int64, error) { - leader := r.coordinator.RaftLeader() + leader := r.coordinator.RaftLeaderForKey(key) if leader == "" { return 0, ErrLeaderNotFound } @@ -1171,7 +1137,7 @@ func parseInt(b []byte) (int, error) { // tryLeaderGet proxies a GET to the current Raft leader, returning the value and // whether the proxy succeeded. func (r *RedisServer) tryLeaderGetAt(key []byte, ts uint64) ([]byte, error) { - addr := r.coordinator.RaftLeader() + addr := r.coordinator.RaftLeaderForKey(key) if addr == "" { return nil, ErrLeaderNotFound } @@ -1195,8 +1161,8 @@ func (r *RedisServer) tryLeaderGetAt(key []byte, ts uint64) ([]byte, error) { } func (r *RedisServer) readValueAt(key []byte, readTS uint64) ([]byte, error) { - if r.coordinator.IsLeader() { - if err := r.coordinator.VerifyLeader(); err != nil { + if r.coordinator.IsLeaderForKey(key) { + if err := r.coordinator.VerifyLeaderForKey(key); err != nil { return nil, errors.WithStack(err) } v, err := r.store.GetAt(context.Background(), key, readTS) @@ -1210,7 +1176,7 @@ func (r *RedisServer) rpush(conn redcon.Conn, cmd redcon.Command) { var length int64 var err error - if r.coordinator.IsLeader() { + if r.coordinator.IsLeaderForKey(cmd.Args[1]) { length, err = r.listRPush(ctx, cmd.Args[1], cmd.Args[2:]) } else { length, err = r.proxyRPush(cmd.Args[1], cmd.Args[2:]) diff --git a/adapter/redis_proxy.go b/adapter/redis_proxy.go index dfed0125..a5df7d4c 100644 --- a/adapter/redis_proxy.go +++ b/adapter/redis_proxy.go @@ -8,7 +8,7 @@ import ( ) func (r *RedisServer) proxyExists(key []byte) (int, error) { - leader := r.coordinator.RaftLeader() + leader := r.coordinator.RaftLeaderForKey(key) if leader == "" { return 0, ErrLeaderNotFound } diff --git a/adapter/test_util.go b/adapter/test_util.go index 6d189875..7a5ddfe0 100644 --- a/adapter/test_util.go +++ b/adapter/test_util.go @@ -29,6 +29,11 @@ import ( func shutdown(nodes []Node) { for _, n := range nodes { n.grpcServer.Stop() + if n.grpcService != nil { + if err := n.grpcService.Close(); err != nil { + log.Printf("grpc service close: %v", err) + } + } n.redisServer.Stop() if n.dynamoServer != nil { n.dynamoServer.Stop() @@ -104,19 +109,21 @@ type Node struct { redisAddress string dynamoAddress string grpcServer *grpc.Server + grpcService *GRPCServer redisServer *RedisServer dynamoServer *DynamoDBServer raft *raft.Raft tm *transport.Manager } -func newNode(grpcAddress, raftAddress, redisAddress, dynamoAddress string, r *raft.Raft, tm *transport.Manager, grpcs *grpc.Server, rd *RedisServer, ds *DynamoDBServer) Node { +func newNode(grpcAddress, raftAddress, redisAddress, dynamoAddress string, r *raft.Raft, tm *transport.Manager, grpcs *grpc.Server, grpcService *GRPCServer, rd *RedisServer, ds *DynamoDBServer) Node { return Node{ grpcAddress: grpcAddress, raftAddress: raftAddress, redisAddress: redisAddress, dynamoAddress: dynamoAddress, grpcServer: grpcs, + grpcService: grpcService, redisServer: rd, dynamoServer: ds, raft: r, @@ -346,7 +353,8 @@ func setupNodes(t *testing.T, ctx context.Context, n int, ports []portsAdress) ( s := grpc.NewServer() trx := kv.NewTransaction(r) coordinator := kv.NewCoordinator(trx, r) - gs := NewGRPCServer(st, coordinator) + routedStore := kv.NewLeaderRoutedStore(st, coordinator) + gs := NewGRPCServer(routedStore, coordinator, WithCloseStore()) tm.Register(s) pb.RegisterRawKVServer(s, gs) pb.RegisterTransactionalKVServer(s, gs) @@ -379,6 +387,7 @@ func setupNodes(t *testing.T, ctx context.Context, n int, ports []portsAdress) ( r, tm, s, + gs, rd, ds, )) diff --git a/cmd/server/demo.go b/cmd/server/demo.go index 5c59c908..9cdae1d3 100644 --- a/cmd/server/demo.go +++ b/cmd/server/demo.go @@ -228,7 +228,8 @@ func setupStorage(dir string) (raft.LogStore, raft.StableStore, raft.SnapshotSto func setupGRPC(r *raft.Raft, st store.MVCCStore, tm *transport.Manager, coordinator *kv.Coordinate, distServer *adapter.DistributionServer) *grpc.Server { s := grpc.NewServer() trx := kv.NewTransaction(r) - gs := adapter.NewGRPCServer(st, coordinator) + routedStore := kv.NewLeaderRoutedStore(st, coordinator) + gs := adapter.NewGRPCServer(routedStore, coordinator, adapter.WithCloseStore()) tm.Register(s) pb.RegisterRawKVServer(s, gs) pb.RegisterTransactionalKVServer(s, gs) diff --git a/distribution/engine.go b/distribution/engine.go index bff6abe7..323e7891 100644 --- a/distribution/engine.go +++ b/distribution/engine.go @@ -121,6 +121,37 @@ func (e *Engine) Stats() []Route { return stats } +// GetIntersectingRoutes returns all routes whose key ranges intersect with [start, end). +// A route [rStart, rEnd) intersects with [start, end) if: +// - rStart < end (or end is nil, meaning unbounded scan) +// - start < rEnd (or rEnd is nil, meaning unbounded route) +func (e *Engine) GetIntersectingRoutes(start, end []byte) []Route { + e.mu.RLock() + defer e.mu.RUnlock() + + var result []Route + for i := range e.routes { + r := &e.routes[i] + // Check if route intersects with [start, end) + // Route ends before scan starts: rEnd != nil && rEnd <= start + if r.End != nil && bytes.Compare(r.End, start) <= 0 { + continue + } + // Route starts at or after scan ends: end != nil && rStart >= end + if end != nil && bytes.Compare(r.Start, end) >= 0 { + continue + } + // Route intersects with scan range + result = append(result, Route{ + Start: cloneBytes(r.Start), + End: cloneBytes(r.End), + GroupID: r.GroupID, + Load: atomic.LoadUint64(&r.Load), + }) + } + return result +} + func (e *Engine) routeIndex(key []byte) int { if len(e.routes) == 0 { return -1 diff --git a/distribution/engine_test.go b/distribution/engine_test.go index 69ee0373..8025de3d 100644 --- a/distribution/engine_test.go +++ b/distribution/engine_test.go @@ -160,3 +160,74 @@ func assertRange(t *testing.T, r Route, start, end []byte) { t.Errorf("expected range [%q, %q), got [%q, %q]", start, end, r.Start, r.End) } } + +func TestEngineGetIntersectingRoutes(t *testing.T) { + e := NewEngine() + e.UpdateRoute([]byte("a"), []byte("m"), 1) + e.UpdateRoute([]byte("m"), []byte("z"), 2) + e.UpdateRoute([]byte("z"), nil, 3) + + cases := []struct { + name string + start []byte + end []byte + groups []uint64 + }{ + { + name: "scan in first range", + start: []byte("b"), + end: []byte("d"), + groups: []uint64{1}, + }, + { + name: "scan across first two ranges", + start: []byte("k"), + end: []byte("p"), + groups: []uint64{1, 2}, + }, + { + name: "scan across all ranges", + start: []byte("a"), + end: nil, + groups: []uint64{1, 2, 3}, + }, + { + name: "scan in last unbounded range", + start: []byte("za"), + end: nil, + groups: []uint64{3}, + }, + { + name: "scan before first range", + start: []byte("0"), + end: []byte("9"), + groups: []uint64{}, + }, + { + name: "scan at boundary", + start: []byte("m"), + end: []byte("n"), + groups: []uint64{2}, + }, + { + name: "scan ending at boundary", + start: []byte("k"), + end: []byte("m"), + groups: []uint64{1}, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + routes := e.GetIntersectingRoutes(c.start, c.end) + if len(routes) != len(c.groups) { + t.Fatalf("expected %d routes, got %d", len(c.groups), len(routes)) + } + for i, expectedGroup := range c.groups { + if routes[i].GroupID != expectedGroup { + t.Errorf("route %d: expected group %d, got %d", i, expectedGroup, routes[i].GroupID) + } + } + }) + } +} diff --git a/jepsen/Vagrantfile b/jepsen/Vagrantfile index d96d3bf5..57fa855f 100644 --- a/jepsen/Vagrantfile +++ b/jepsen/Vagrantfile @@ -1,3 +1,5 @@ +require "fileutils" + NODES = { ctrl: "192.168.56.10", n1: "192.168.56.11", @@ -7,6 +9,19 @@ NODES = { n5: "192.168.56.15" }.freeze +KEY_DIR = File.join(__dir__, ".ssh") +CTRL_KEY = File.join(KEY_DIR, "ctrl_id_rsa") +CTRL_PUB = "#{CTRL_KEY}.pub" + +unless File.exist?(CTRL_KEY) + FileUtils.mkdir_p(KEY_DIR) + unless system("ssh-keygen", "-t", "rsa", "-b", "2048", "-N", "", "-f", CTRL_KEY) + raise "failed to generate Jepsen SSH key at #{CTRL_KEY}" + end +end + +CTRL_PUB_KEY = File.read(CTRL_PUB).strip + Vagrant.configure("2") do |config| config.ssh.insert_key = false #config.vm.box = "debian/bookworm64" @@ -37,7 +52,7 @@ Vagrant.configure("2") do |config| node.vm.synced_folder ".", "/vagrant", disabled: true end - node.vm.provision "shell", path: "provision/base.sh", args: name.to_s + node.vm.provision "shell", path: "provision/base.sh", args: [name.to_s, CTRL_PUB_KEY] end end end diff --git a/jepsen/docker/run-in-docker.sh b/jepsen/docker/run-in-docker.sh new file mode 100644 index 00000000..65df9ad4 --- /dev/null +++ b/jepsen/docker/run-in-docker.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Copy source to writable area +mkdir -p /root/elastickv +rsync -a /jepsen-ro/ /root/elastickv/ --exclude .git --exclude jepsen/target --exclude jepsen/tmp-home + +cd /root/elastickv/jepsen + +# Install Go +if ! command -v go >/dev/null 2>&1; then + TOOLCHAIN="$(awk '$1 == "toolchain" { print $2 }' ../go.mod 2>/dev/null | head -n1 || true)" + if [ -n "${TOOLCHAIN}" ]; then + GO_VERSION="${TOOLCHAIN#go}" + else + GO_VERSION=1.26.0 + fi + ARCH="amd64" # Assuming amd64 for now, or detect + if [ "$(uname -m)" = "aarch64" ]; then ARCH="arm64"; fi + + TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz" + curl -fsSL "https://go.dev/dl/${TARBALL}" -o "${TARBALL}" + curl -fsSL "https://go.dev/dl/${TARBALL}.sha256" -o "${TARBALL}.sha256" + + expected_sha256="$(awk '{print $1}' < "${TARBALL}.sha256")" + actual_sha256="$(sha256sum "${TARBALL}" | awk '{print $1}')" + if [ "${expected_sha256}" != "${actual_sha256}" ]; then + echo "Go toolchain checksum mismatch for ${TARBALL}" >&2 + exit 1 + fi + + tar -C /usr/local -xzf "${TARBALL}" + rm -f "${TARBALL}" "${TARBALL}.sha256" + export PATH=$PATH:/usr/local/go/bin +fi + +# Install Leiningen +if ! command -v lein >/dev/null 2>&1; then + apt-get update -y + apt-get install -y --no-install-recommends leiningen +fi + +# Generate or install SSH key for control node to connect to others +if [ ! -f /root/.ssh/id_rsa ]; then + mkdir -p /root/.ssh + if [ -n "${JEPSEN_SSH_PRIVATE_KEY:-}" ]; then + printf "%s" "${JEPSEN_SSH_PRIVATE_KEY}" > /root/.ssh/id_rsa + elif [ -n "${JEPSEN_SSH_PRIVATE_KEY_PATH:-}" ] && [ -f "${JEPSEN_SSH_PRIVATE_KEY_PATH}" ]; then + cp "${JEPSEN_SSH_PRIVATE_KEY_PATH}" /root/.ssh/id_rsa + elif [ -f /jepsen-ro/jepsen/docker/id_rsa ]; then + # Backward-compatible path (local, uncommitted key file) + cp /jepsen-ro/jepsen/docker/id_rsa /root/.ssh/id_rsa + else + if ! command -v ssh-keygen >/dev/null 2>&1; then + apt-get update -y + apt-get install -y --no-install-recommends openssh-client + fi + ssh-keygen -t rsa -b 2048 -N "" -f /root/.ssh/id_rsa + fi + chmod 600 /root/.ssh/id_rsa + # Disable strict host checking + echo "Host *" > /root/.ssh/config + echo " StrictHostKeyChecking no" >> /root/.ssh/config + echo " UserKnownHostsFile /dev/null" >> /root/.ssh/config + echo " User vagrant" >> /root/.ssh/config +fi + +# Run test +# Nodes are reachable by hostname (n1, n2...) in docker network +export LEIN_ROOT=true +lein run -m elastickv.redis-workload \ + --nodes n1,n2,n3,n4,n5 \ + --time-limit 60 \ + --rate 10 \ + --faults partition,kill,clock \ + --concurrency 10 diff --git a/jepsen/docker/ssh_config b/jepsen/docker/ssh_config new file mode 100644 index 00000000..388f5211 --- /dev/null +++ b/jepsen/docker/ssh_config @@ -0,0 +1,44 @@ +Host n1 + HostName 127.0.0.1 + User vagrant + Port 2221 + IdentityFile ~/.ssh/id_rsa + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + LogLevel ERROR + +Host n2 + HostName 127.0.0.1 + User vagrant + Port 2222 + IdentityFile ~/.ssh/id_rsa + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + LogLevel ERROR + +Host n3 + HostName 127.0.0.1 + User vagrant + Port 2223 + IdentityFile ~/.ssh/id_rsa + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + LogLevel ERROR + +Host n4 + HostName 127.0.0.1 + User vagrant + Port 2224 + IdentityFile ~/.ssh/id_rsa + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + LogLevel ERROR + +Host n5 + HostName 127.0.0.1 + User vagrant + Port 2225 + IdentityFile ~/.ssh/id_rsa + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + LogLevel ERROR diff --git a/jepsen/provision/base.sh b/jepsen/provision/base.sh index 567d562f..f7f44313 100755 --- a/jepsen/provision/base.sh +++ b/jepsen/provision/base.sh @@ -2,6 +2,7 @@ set -euo pipefail ROLE="${1:-db}" +PUBKEY="${2:-}" echo "[jepsen] provisioning role=${ROLE}" sudo apt-get update -y @@ -44,38 +45,20 @@ if [ "$ROLE" = "ctrl" ]; then echo 'export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin' | sudo tee /etc/profile.d/go.sh >/dev/null if [ ! -f /home/vagrant/.ssh/id_rsa ]; then - cat <<'KEY' > /home/vagrant/.ssh/id_rsa ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzI -w+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoP -kcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2 -hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NO -Td0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcW -yLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQIBIwKCAQEA4iqWPJXtzZA68mKd -ELs4jJsdyky+ewdZeNds5tjcnHU5zUYE25K+ffJED9qUWICcLZDc81TGWjHyAqD1 -Bw7XpgUwFgeUJwUlzQurAv+/ySnxiwuaGJfhFM1CaQHzfXphgVml+fZUvnJUTvzf -TK2Lg6EdbUE9TarUlBf/xPfuEhMSlIE5keb/Zz3/LUlRg8yDqz5w+QWVJ4utnKnK -iqwZN0mwpwU7YSyJhlT4YV1F3n4YjLswM5wJs2oqm0jssQu/BT0tyEXNDYBLEF4A -sClaWuSJ2kjq7KhrrYXzagqhnSei9ODYFShJu8UWVec3Ihb5ZXlzO6vdNQ1J9Xsf -4m+2ywKBgQD6qFxx/Rv9CNN96l/4rb14HKirC2o/orApiHmHDsURs5rUKDx0f9iP -cXN7S1uePXuJRK/5hsubaOCx3Owd2u9gD6Oq0CsMkE4CUSiJcYrMANtx54cGH7Rk -EjFZxK8xAv1ldELEyxrFqkbE4BKd8QOt414qjvTGyAK+OLD3M2QdCQKBgQDtx8pN -CAxR7yhHbIWT1AH66+XWN8bXq7l3RO/ukeaci98JfkbkxURZhtxV/HHuvUhnPLdX -3TwygPBYZFNo4pzVEhzWoTtnEtrFueKxyc3+LjZpuo+mBlQ6ORtfgkr9gBVphXZG -YEzkCD3lVdl8L4cw9BVpKrJCs1c5taGjDgdInQKBgHm/fVvv96bJxc9x1tffXAcj -3OVdUN0UgXNCSaf/3A/phbeBQe9xS+3mpc4r6qvx+iy69mNBeNZ0xOitIjpjBo2+ -dBEjSBwLk5q5tJqHmy/jKMJL4n9ROlx93XS+njxgibTvU6Fp9w+NOFD/HvxB3Tcz -6+jJF85D5BNAG3DBMKBjAoGBAOAxZvgsKN+JuENXsST7F89Tck2iTcQIT8g5rwWC -P9Vt74yboe2kDT531w8+egz7nAmRBKNM751U/95P9t88EDacDI/Z2OwnuFQHCPDF -llYOUI+SpLJ6/vURRbHSnnn8a/XG+nzedGH5JGqEJNQsz+xT2axM0/W/CRknmGaJ -kda/AoGANWrLCz708y7VYgAtW2Uf1DPOIYMdvo6fxIB5i9ZfISgcJ/bbCUkFrhoH -+vq/5CIWxCPp0f85R4qxxQ5ihxJ0YDQT9Jpx4TMss4PSavPaBH3RXow5Ohe+bYoQ -NE5OgEXk2wVfZczCZpigBKbKZHNYcelXtTt/nP3rsCuGcM4h53s= ------END RSA PRIVATE KEY----- -KEY + if [ -f /home/vagrant/elastickv/jepsen/.ssh/ctrl_id_rsa ]; then + cp /home/vagrant/elastickv/jepsen/.ssh/ctrl_id_rsa /home/vagrant/.ssh/id_rsa + else + if ! command -v ssh-keygen >/dev/null 2>&1; then + sudo apt-get install -y --no-install-recommends openssh-client + fi + ssh-keygen -t rsa -b 2048 -N "" -f /home/vagrant/.ssh/id_rsa + fi chmod 600 /home/vagrant/.ssh/id_rsa chown vagrant:vagrant /home/vagrant/.ssh/id_rsa fi + if [ -z "${PUBKEY}" ] && [ -f /home/vagrant/.ssh/id_rsa.pub ]; then + PUBKEY="$(cat /home/vagrant/.ssh/id_rsa.pub)" + fi cat <<'EOF' > /home/vagrant/.ssh/config Host n1 n2 n3 n4 n5 User vagrant @@ -87,10 +70,12 @@ EOF chown vagrant:vagrant /home/vagrant/.ssh/config fi -# authorize the same key on all nodes -cat <<'PUB' >> /home/vagrant/.ssh/authorized_keys -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDo0XyJqWW9BWnbZYOROSyu2+n15ZbrgPGFa/pM+E4xmHu4B8yMPp4jbWRhR8w/Pr9SNmCeqF3r3LdWHktKPR2cjduPaoAoM1BbXTii7+iHnaZaqD5HJhXQhr3Y+QQOjcYVMFyQU8hMAzMF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key -PUB +touch /home/vagrant/.ssh/authorized_keys +if [ -n "${PUBKEY}" ]; then + if ! grep -Fq "${PUBKEY}" /home/vagrant/.ssh/authorized_keys; then + echo "${PUBKEY}" >> /home/vagrant/.ssh/authorized_keys + fi +fi chown vagrant:vagrant /home/vagrant/.ssh/authorized_keys chmod 600 /home/vagrant/.ssh/authorized_keys diff --git a/jepsen/src/elastickv/db.clj b/jepsen/src/elastickv/db.clj index 63917a47..6248bd3c 100644 --- a/jepsen/src/elastickv/db.clj +++ b/jepsen/src/elastickv/db.clj @@ -71,19 +71,42 @@ (get port-spec node) port-spec)) -(defn- build-raft-redis-map [nodes grpc-port redis-port] - (->> nodes - (map (fn [n] - (let [g (node-addr n (port-for grpc-port n)) - r (node-addr n (port-for redis-port n))] - (str g "=" r)))) +(defn- group-ids [raft-groups] + (->> (keys raft-groups) + (sort))) + +(defn- group-addr [node raft-groups group-id] + (node-addr node (port-for (get raft-groups group-id) node))) + +(defn- build-raft-groups-arg [node raft-groups] + (->> (group-ids raft-groups) + (map (fn [gid] + (str gid "=" (group-addr node raft-groups gid)))) (clojure.string/join ","))) +(defn- build-raft-redis-map [nodes grpc-port redis-port raft-groups] + (let [groups (when (seq raft-groups) (group-ids raft-groups))] + (->> nodes + (mapcat (fn [n] + (let [redis (node-addr n (port-for redis-port n))] + (if (seq groups) + (map (fn [gid] + (str (group-addr n raft-groups gid) "=" redis)) + groups) + [(str (node-addr n (port-for grpc-port n)) "=" redis)])))) + (clojure.string/join ",")))) + (defn- start-node! - [test node {:keys [bootstrap-node grpc-port redis-port data-dir]}] - (let [grpc (node-addr node (port-for grpc-port node)) + [test node {:keys [bootstrap-node grpc-port redis-port data-dir raft-groups shard-ranges]}] + (when (and (seq raft-groups) + (> (count raft-groups) 1) + (nil? shard-ranges)) + (throw (ex-info "shard-ranges is required when raft-groups has multiple entries" {}))) + (let [grpc (if (seq raft-groups) + (group-addr node raft-groups (first (group-ids raft-groups))) + (node-addr node (port-for grpc-port node))) redis (node-addr node (port-for redis-port node)) - raft-redis-map (build-raft-redis-map (:nodes test) grpc-port redis-port) + raft-redis-map (build-raft-redis-map (:nodes test) grpc-port redis-port raft-groups) bootstrap? (= node bootstrap-node) args (cond-> [server-bin "--address" grpc @@ -91,6 +114,8 @@ "--raftId" (name node) "--raftDataDir" data-dir "--raftRedisMap" raft-redis-map] + (seq raft-groups) (conj "--raftGroups" (build-raft-groups-arg node raft-groups)) + (seq shard-ranges) (conj "--shardRanges" shard-ranges) bootstrap? (conj "--raftBootstrap"))] (c/on node (c/su @@ -111,10 +136,12 @@ (defn- wait-for-grpc! "Wait until the given node listens on grpc port." [node grpc-port] - (c/on node - (c/exec :bash "-c" - (format "for i in $(seq 1 60); do if nc -z -w 1 %s %s; then exit 0; fi; sleep 1; done; echo 'Timed out waiting for %s:%s'; exit 1" - (name node) grpc-port (name node) grpc-port)))) + (let [ports (if (sequential? grpc-port) grpc-port [grpc-port])] + (doseq [p ports] + (c/on node + (c/exec :bash "-c" + "for i in $(seq 1 60); do if nc -z -w 1 $1 $2; then exit 0; fi; sleep 1; done; echo \\\"Timed out waiting for $1:$2\\\"; exit 1" + "--" (name node) (str p)))))) (defn- join-node! "Join peer into cluster via raftadmin, executed on bootstrap node." @@ -138,14 +165,26 @@ :bootstrap-node (first (:nodes test))} opts)) (when (= node (first (:nodes test))) - (let [leader (node-addr node (or (:grpc-port opts) 50051))] + (let [raft-groups (:raft-groups opts) + grpc-port (or (:grpc-port opts) 50051) + group-ids (when (seq raft-groups) (group-ids raft-groups))] (doseq [peer (rest (:nodes test))] (util/await-fn (fn [] (try - (wait-for-grpc! peer (or (:grpc-port opts) 50051)) - (join-node! node leader (name peer) - (node-addr peer (or (:grpc-port opts) 50051))) + (if (seq raft-groups) + (doseq [gid group-ids] + (wait-for-grpc! peer (port-for (get raft-groups gid) peer)) + (join-node! node + (group-addr node raft-groups gid) + (name peer) + (group-addr peer raft-groups gid))) + (do + (wait-for-grpc! peer grpc-port) + (join-node! node + (node-addr node grpc-port) + (name peer) + (node-addr peer grpc-port)))) true (catch Throwable t (warn t "retrying join for" peer) @@ -171,7 +210,10 @@ :redis-port (or (:redis-port opts) 6379) :bootstrap-node (first (:nodes test))} opts)) - (wait-for-grpc! node (or (:grpc-port opts) 50051)) + (if-let [raft-groups (:raft-groups opts)] + (wait-for-grpc! node (map (fn [gid] (port-for (get raft-groups gid) node)) + (group-ids raft-groups))) + (wait-for-grpc! node (or (:grpc-port opts) 50051))) (info "node started" node) this) (kill! [this _test node] @@ -194,6 +236,8 @@ (defn db "Constructs an ElastickvDB with optional opts. - opts: {:grpc-port 50051 :redis-port 6379}" + opts: {:grpc-port 50051 :redis-port 6379 + :raft-groups {1 50051 2 50052} + :shard-ranges \":m=1,m:=2\"}" ([] (->ElastickvDB {})) ([opts] (->ElastickvDB opts))) diff --git a/jepsen/src/elastickv/redis_workload.clj b/jepsen/src/elastickv/redis_workload.clj index f844c1bf..4a5730c6 100644 --- a/jepsen/src/elastickv/redis_workload.clj +++ b/jepsen/src/elastickv/redis_workload.clj @@ -3,11 +3,14 @@ (:require [clojure.string :as str] [clojure.tools.cli :as tools.cli] [elastickv.db :as ekdb] + [jepsen.db :as jdb] [jepsen [client :as client] [core :as jepsen] [generator :as gen] [net :as net]] [jepsen.control :as control] + [jepsen.os :as os] + [jepsen.nemesis :as nemesis] [jepsen.nemesis.combined :as combined] [jepsen.os.debian :as debian] [jepsen.redis.client :as rc] @@ -82,44 +85,79 @@ (let [nodes (or (:nodes opts) default-nodes) redis-ports (or (:redis-ports opts) (repeat (count nodes) (or (:redis-port opts) 6379))) node->port (or (:node->port opts) (ports->node-map redis-ports nodes)) - db (ekdb/db {:grpc-port (or (:grpc-port opts) 50051) - :redis-port node->port}) + local? (:local opts) + db (if local? + jdb/noop + (ekdb/db {:grpc-port (or (:grpc-port opts) 50051) + :redis-port node->port + :raft-groups (:raft-groups opts) + :shard-ranges (:shard-ranges opts)})) rate (double (or (:rate opts) 5)) time-limit (or (:time-limit opts) 30) - faults (normalize-faults (or (:faults opts) [:partition :kill])) - nemesis-p (combined/nemesis-package {:db db - :faults faults - :interval (or (:fault-interval opts) 40)}) + faults (if local? + [] + (normalize-faults (or (:faults opts) [:partition :kill]))) + nemesis-p (when-not local? + (combined/nemesis-package {:db db + :faults faults + :interval (or (:fault-interval opts) 40)})) + nemesis-gen (if nemesis-p + (:generator nemesis-p) + (gen/once {:type :info :f :noop})) workload (elastickv-append-workload (assoc opts :node->port node->port))] (merge workload {:name (or (:name opts) "elastickv-redis-append") :nodes nodes :db db - :os debian/os - :net net/iptables + :redis-host (:redis-host opts) + :os (if local? os/noop debian/os) + :net (if local? net/noop net/iptables) :ssh (merge {:username "vagrant" :private-key-path "/home/vagrant/.ssh/id_rsa" :strict-host-key-checking false} + (when local? {:dummy true}) (:ssh opts)) :remote control/ssh - :nemesis (:nemesis nemesis-p) + :nemesis (if nemesis-p + (:nemesis nemesis-p) + nemesis/noop) ; Jepsen 0.3.x can't fressian-serialize some combined final gens; skip. :final-generator nil :concurrency (or (:concurrency opts) 5) :generator (->> (:generator workload) - (gen/nemesis (:generator nemesis-p)) + (gen/nemesis nemesis-gen) (gen/stagger (/ rate)) (gen/time-limit time-limit))})))) (def cli-opts [[nil "--nodes NODES" "Comma separated node names." :default "n1,n2,n3,n4,n5"] + [nil "--local" "Run locally without SSH or nemesis." + :default false] + [nil "--host HOST" "Redis host override for clients." + :default nil] + [nil "--ports PORTS" "Comma separated Redis ports (per node)." + :default nil + :parse-fn (fn [s] + (->> (str/split s #",") + (remove str/blank?) + (mapv #(Integer/parseInt %))))] [nil "--redis-port PORT" "Redis port (applied to all nodes)." :default 6379 :parse-fn #(Integer/parseInt %)] [nil "--grpc-port PORT" "gRPC/Raft port." :default 50051 :parse-fn #(Integer/parseInt %)] + [nil "--raft-groups GROUPS" "Comma separated raft groups (groupID=port,...)" + :parse-fn (fn [s] + (->> (str/split s #",") + (remove str/blank?) + (map (fn [part] + (let [[gid port] (str/split part #"=" 2)] + [(Long/parseLong gid) (Integer/parseInt port)]))) + (into {})))] + [nil "--shard-ranges RANGES" "Shard ranges (start:end=groupID,...)" + :default nil] [nil "--faults LIST" "Comma separated faults (partition,kill,clock)." :default "partition,kill,clock"] [nil "--ssh-key PATH" "SSH private key path." @@ -140,7 +178,13 @@ (defn -main [& args] (let [{:keys [options errors summary]} (tools.cli/parse-opts args cli-opts) - node-list (-> (:nodes options) + default-nodes "n1,n2,n3,n4,n5" + ports (:ports options) + local? (or (:local options) (and (:host options) (seq ports))) + nodes-raw (if (and ports (= (:nodes options) default-nodes)) + (str/join "," (map (fn [i] (str "n" i)) (range 1 (inc (count ports))))) + (:nodes options)) + node-list (-> nodes-raw (str/split #",") (->> (remove str/blank?) vec)) @@ -152,8 +196,13 @@ options (assoc options :nodes node-list :faults faults + :local local? + :redis-host (:host options) + :redis-ports ports :redis-port (:redis-port options) :grpc-port (:grpc-port options) + :raft-groups (:raft-groups options) + :shard-ranges (:shard-ranges options) :ssh {:username (:ssh-user options) :private-key-path (:ssh-key options) :strict-host-key-checking false})] @@ -161,4 +210,6 @@ (:help options) (println summary) (seq errors) (binding [*out* *err*] (println "Error parsing options:" (str/join "; " errors))) + (:local options) (binding [control/*dummy* true] + (jepsen/run! (elastickv-redis-test options))) :else (jepsen/run! (elastickv-redis-test options))))) diff --git a/kv/coordinator.go b/kv/coordinator.go index 8340105c..9bf5b1e7 100644 --- a/kv/coordinator.go +++ b/kv/coordinator.go @@ -6,8 +6,6 @@ import ( pb "github.com/bootjp/elastickv/proto" "github.com/cockroachdb/errors" "github.com/hashicorp/raft" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" ) func NewCoordinator(txm Transactional, r *raft.Raft) *Coordinate { @@ -26,21 +24,34 @@ type Coordinate struct { transactionManager Transactional raft *raft.Raft clock *HLC + connCache GRPCConnCache } var _ Coordinator = (*Coordinate)(nil) type Coordinator interface { - Dispatch(reqs *OperationGroup[OP]) (*CoordinateResponse, error) + Dispatch(ctx context.Context, reqs *OperationGroup[OP]) (*CoordinateResponse, error) IsLeader() bool VerifyLeader() error RaftLeader() raft.ServerAddress + IsLeaderForKey(key []byte) bool + VerifyLeaderForKey(key []byte) error + RaftLeaderForKey(key []byte) raft.ServerAddress Clock() *HLC } -func (c *Coordinate) Dispatch(reqs *OperationGroup[OP]) (*CoordinateResponse, error) { +func (c *Coordinate) Dispatch(ctx context.Context, reqs *OperationGroup[OP]) (*CoordinateResponse, error) { + if ctx == nil { + ctx = context.Background() + } + + // Validate the request before any use to avoid panics on malformed input. + if err := validateOperationGroup(reqs); err != nil { + return nil, err + } + if !c.IsLeader() { - return c.redirect(reqs) + return c.redirect(ctx, reqs) } if reqs.IsTxn && reqs.StartTS == 0 { @@ -73,16 +84,24 @@ func (c *Coordinate) Clock() *HLC { return c.clock } +func (c *Coordinate) IsLeaderForKey(_ []byte) bool { + return c.IsLeader() +} + +func (c *Coordinate) VerifyLeaderForKey(_ []byte) error { + return c.VerifyLeader() +} + +func (c *Coordinate) RaftLeaderForKey(_ []byte) raft.ServerAddress { + return c.RaftLeader() +} + func (c *Coordinate) nextStartTS() uint64 { return c.clock.Next() } func (c *Coordinate) dispatchTxn(reqs []*Elem[OP], startTS uint64) (*CoordinateResponse, error) { - var logs []*pb.Request - for _, req := range reqs { - m := c.toTxnRequests(req, startTS) - logs = append(logs, m...) - } + logs := txnRequests(startTS, reqs) r, err := c.transactionManager.Commit(logs) if err != nil { @@ -144,89 +163,29 @@ func (c *Coordinate) toRawRequest(req *Elem[OP]) *pb.Request { panic("unreachable") } -func (c *Coordinate) toTxnRequests(req *Elem[OP], startTS uint64) []*pb.Request { - switch req.Op { - case Put: - return []*pb.Request{ - { - IsTxn: true, - Phase: pb.Phase_PREPARE, - Ts: startTS, - Mutations: []*pb.Mutation{ - { - Key: req.Key, - Value: req.Value, - }, - }, - }, - { - IsTxn: true, - Phase: pb.Phase_COMMIT, - Ts: startTS, - Mutations: []*pb.Mutation{ - { - Key: req.Key, - Value: req.Value, - }, - }, - }, - } - - case Del: - return []*pb.Request{ - { - IsTxn: true, - Phase: pb.Phase_PREPARE, - Ts: startTS, - Mutations: []*pb.Mutation{ - { - Key: req.Key, - }, - }, - }, - { - IsTxn: true, - Phase: pb.Phase_COMMIT, - Ts: startTS, - Mutations: []*pb.Mutation{ - { - Key: req.Key, - }, - }, - }, - } - } - - panic("unreachable") -} - var ErrInvalidRequest = errors.New("invalid request") +var ErrLeaderNotFound = errors.New("leader not found") -func (c *Coordinate) redirect(reqs *OperationGroup[OP]) (*CoordinateResponse, error) { - ctx := context.Background() - +func (c *Coordinate) redirect(ctx context.Context, reqs *OperationGroup[OP]) (*CoordinateResponse, error) { if len(reqs.Elems) == 0 { return nil, ErrInvalidRequest } addr, _ := c.raft.LeaderWithID() + if addr == "" { + return nil, errors.WithStack(ErrLeaderNotFound) + } - conn, err := grpc.NewClient(string(addr), - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - ) + conn, err := c.connCache.ConnFor(addr) if err != nil { - return nil, errors.WithStack(err) + return nil, err } - defer conn.Close() cli := pb.NewInternalClient(conn) var requests []*pb.Request if reqs.IsTxn { - for _, req := range reqs.Elems { - requests = append(requests, c.toTxnRequests(req, reqs.StartTS)...) - } + requests = txnRequests(reqs.StartTS, reqs.Elems) } else { for _, req := range reqs.Elems { requests = append(requests, c.toRawRequest(req)) @@ -260,3 +219,35 @@ func (c *Coordinate) toForwardRequest(reqs []*pb.Request) *pb.ForwardRequest { return out } + +func elemToMutation(req *Elem[OP]) *pb.Mutation { + switch req.Op { + case Put: + return &pb.Mutation{ + Op: pb.Op_PUT, + Key: req.Key, + Value: req.Value, + } + case Del: + return &pb.Mutation{ + Op: pb.Op_DEL, + Key: req.Key, + } + } + panic("unreachable") +} + +func txnRequests(startTS uint64, reqs []*Elem[OP]) []*pb.Request { + muts := make([]*pb.Mutation, 0, len(reqs)) + for _, req := range reqs { + muts = append(muts, elemToMutation(req)) + } + + // Use separate slices for PREPARE and COMMIT to avoid sharing slice header/state. + prepareMuts := append([]*pb.Mutation(nil), muts...) + commitMuts := append([]*pb.Mutation(nil), muts...) + return []*pb.Request{ + {IsTxn: true, Phase: pb.Phase_PREPARE, Ts: startTS, Mutations: prepareMuts}, + {IsTxn: true, Phase: pb.Phase_COMMIT, Ts: startTS, Mutations: commitMuts}, + } +} diff --git a/kv/grpc_conn_cache.go b/kv/grpc_conn_cache.go new file mode 100644 index 00000000..fbca3d07 --- /dev/null +++ b/kv/grpc_conn_cache.go @@ -0,0 +1,109 @@ +package kv + +import ( + "sync" + + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" +) + +// GRPCConnCache reuses gRPC connections per address. gRPC itself handles +// reconnection on transient failures; we only force a re-dial if the conn has +// already been closed (Shutdown). +type GRPCConnCache struct { + mu sync.Mutex + conns map[raft.ServerAddress]*grpc.ClientConn +} + +func (c *GRPCConnCache) cachedConn(addr raft.ServerAddress) *grpc.ClientConn { + c.mu.Lock() + defer c.mu.Unlock() + + if c.conns == nil { + c.conns = make(map[raft.ServerAddress]*grpc.ClientConn) + } + + conn, ok := c.conns[addr] + if !ok || conn == nil { + return nil + } + + st := conn.GetState() + if st == connectivity.Shutdown { + delete(c.conns, addr) + return nil + } + if st == connectivity.TransientFailure { + conn.ResetConnectBackoff() + } + return conn +} + +func (c *GRPCConnCache) storeConn(addr raft.ServerAddress, conn *grpc.ClientConn) *grpc.ClientConn { + c.mu.Lock() + defer c.mu.Unlock() + + if c.conns == nil { + c.conns = make(map[raft.ServerAddress]*grpc.ClientConn) + } + + existing, ok := c.conns[addr] + if ok && existing != nil { + st := existing.GetState() + if st != connectivity.Shutdown { + if st == connectivity.TransientFailure { + existing.ResetConnectBackoff() + } + return existing + } + delete(c.conns, addr) + } + + c.conns[addr] = conn + return conn +} + +func (c *GRPCConnCache) ConnFor(addr raft.ServerAddress) (*grpc.ClientConn, error) { + if addr == "" { + return nil, errors.WithStack(ErrLeaderNotFound) + } + + if conn := c.cachedConn(addr); conn != nil { + return conn, nil + } + + conn, err := grpc.NewClient(string(addr), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + ) + if err != nil { + return nil, errors.WithStack(err) + } + + stored := c.storeConn(addr, conn) + if stored != conn { + _ = conn.Close() + } + return stored, nil +} + +func (c *GRPCConnCache) Close() error { + c.mu.Lock() + conns := c.conns + c.conns = nil + c.mu.Unlock() + + var first error + for _, conn := range conns { + if conn == nil { + continue + } + if err := conn.Close(); err != nil && first == nil { + first = errors.WithStack(err) + } + } + return first +} diff --git a/kv/grpc_conn_cache_test.go b/kv/grpc_conn_cache_test.go new file mode 100644 index 00000000..715cd575 --- /dev/null +++ b/kv/grpc_conn_cache_test.go @@ -0,0 +1,103 @@ +package kv + +import ( + "context" + "net" + "sync" + "testing" + + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func TestGRPCConnCache_ConnForEmptyAddr(t *testing.T) { + t.Parallel() + + var c GRPCConnCache + conn, err := c.ConnFor("") + require.Nil(t, conn) + require.Error(t, err) + require.True(t, errors.Is(err, ErrLeaderNotFound)) +} + +func TestGRPCConnCache_ReusesConnection(t *testing.T) { + t.Parallel() + + var lc net.ListenConfig + lis, err := lc.Listen(context.Background(), "tcp", "127.0.0.1:0") + require.NoError(t, err) + + srv := grpc.NewServer() + go func() { + _ = srv.Serve(lis) + }() + t.Cleanup(func() { + srv.Stop() + _ = lis.Close() + }) + + addr := raft.ServerAddress(lis.Addr().String()) + + var c GRPCConnCache + conn1, err := c.ConnFor(addr) + require.NoError(t, err) + conn2, err := c.ConnFor(addr) + require.NoError(t, err) + require.Same(t, conn1, conn2) + + require.NoError(t, c.Close()) + + conn3, err := c.ConnFor(addr) + require.NoError(t, err) + require.NotSame(t, conn1, conn3) + + require.NoError(t, c.Close()) +} + +func TestGRPCConnCache_ConcurrentConnFor(t *testing.T) { + t.Parallel() + + var lc net.ListenConfig + lis, err := lc.Listen(context.Background(), "tcp", "127.0.0.1:0") + require.NoError(t, err) + + srv := grpc.NewServer() + go func() { + _ = srv.Serve(lis) + }() + t.Cleanup(func() { + srv.Stop() + _ = lis.Close() + }) + + addr := raft.ServerAddress(lis.Addr().String()) + + var c GRPCConnCache + + const n = 32 + conns := make([]*grpc.ClientConn, n) + errs := make([]error, n) + + var wg sync.WaitGroup + wg.Add(n) + for i := 0; i < n; i++ { + go func(i int) { + defer wg.Done() + conn, err := c.ConnFor(addr) + conns[i] = conn + errs[i] = err + }(i) + } + wg.Wait() + + for i := 0; i < n; i++ { + require.NoError(t, errs[i]) + } + for i := 1; i < n; i++ { + require.Same(t, conns[0], conns[i]) + } + + require.NoError(t, c.Close()) +} diff --git a/kv/latest_commit_ts.go b/kv/latest_commit_ts.go new file mode 100644 index 00000000..3de65e2b --- /dev/null +++ b/kv/latest_commit_ts.go @@ -0,0 +1,106 @@ +package kv + +import ( + "context" + "sync" + + "github.com/bootjp/elastickv/store" + "github.com/cockroachdb/errors" + "golang.org/x/sync/errgroup" +) + +const maxLatestCommitTSConcurrency = 16 + +// MaxLatestCommitTS returns the maximum commit timestamp for the provided keys. +// +// Missing keys are ignored. If any LatestCommitTS lookup returns an error, the +// error is returned to the caller. +func MaxLatestCommitTS(ctx context.Context, st store.MVCCStore, keys [][]byte) (uint64, error) { + if st == nil || len(keys) == 0 { + return 0, nil + } + + uniq := uniqueKeys(keys) + if len(uniq) == 0 { + return 0, nil + } + + // Avoid goroutine overhead for tiny inputs. + if len(uniq) == 1 { + return maxLatestCommitTSSequential(ctx, st, uniq) + } + + limit := maxLatestCommitTSConcurrency + if limit < 1 { + limit = 1 + } + if limit > len(uniq) { + limit = len(uniq) + } + + return maxLatestCommitTSParallel(ctx, st, uniq, limit) +} + +func uniqueKeys(keys [][]byte) [][]byte { + seen := make(map[string]struct{}, len(keys)) + uniq := make([][]byte, 0, len(keys)) + for _, key := range keys { + if len(key) == 0 { + continue + } + k := string(key) + if _, ok := seen[k]; ok { + continue + } + seen[k] = struct{}{} + uniq = append(uniq, key) + } + return uniq +} + +func maxLatestCommitTSSequential(ctx context.Context, st store.MVCCStore, keys [][]byte) (uint64, error) { + var maxTS uint64 + for _, key := range keys { + ts, exists, err := st.LatestCommitTS(ctx, key) + if err != nil { + return 0, errors.WithStack(err) + } + if !exists { + continue + } + if ts > maxTS { + maxTS = ts + } + } + return maxTS, nil +} + +func maxLatestCommitTSParallel(ctx context.Context, st store.MVCCStore, keys [][]byte, limit int) (uint64, error) { + eg, egctx := errgroup.WithContext(ctx) + eg.SetLimit(limit) + + var mu sync.Mutex + var maxTS uint64 + for i := range keys { + key := keys[i] + eg.Go(func() error { + ts, exists, err := st.LatestCommitTS(egctx, key) + if err != nil { + return errors.WithStack(err) + } + if !exists { + return nil + } + mu.Lock() + if ts > maxTS { + maxTS = ts + } + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + return 0, err + } + return maxTS, nil +} diff --git a/kv/latest_commit_ts_test.go b/kv/latest_commit_ts_test.go new file mode 100644 index 00000000..65933139 --- /dev/null +++ b/kv/latest_commit_ts_test.go @@ -0,0 +1,91 @@ +package kv + +import ( + "bytes" + "context" + "testing" + + "github.com/bootjp/elastickv/store" + "github.com/cockroachdb/errors" + "github.com/stretchr/testify/require" +) + +var ErrTestLatestCommitTS = errors.New("test latest commit ts") + +type erroringLatestCommitStore struct { + store.MVCCStore + key []byte +} + +func (s erroringLatestCommitStore) LatestCommitTS(ctx context.Context, key []byte) (uint64, bool, error) { + if bytes.Equal(key, s.key) { + return 0, false, ErrTestLatestCommitTS + } + return s.MVCCStore.LatestCommitTS(ctx, key) +} + +func TestMaxLatestCommitTS_Empty(t *testing.T) { + t.Parallel() + + ctx := context.Background() + ts, err := MaxLatestCommitTS(ctx, nil, nil) + require.NoError(t, err) + require.Equal(t, uint64(0), ts) + + st := store.NewMVCCStore() + ts, err = MaxLatestCommitTS(ctx, st, nil) + require.NoError(t, err) + require.Equal(t, uint64(0), ts) + + ts, err = MaxLatestCommitTS(ctx, st, [][]byte{nil, {}}) + require.NoError(t, err) + require.Equal(t, uint64(0), ts) +} + +func TestMaxLatestCommitTS_SingleKey(t *testing.T) { + t.Parallel() + + ctx := context.Background() + st := store.NewMVCCStore() + require.NoError(t, st.PutAt(ctx, []byte("k1"), []byte("v1"), 10, 0)) + + ts, err := MaxLatestCommitTS(ctx, st, [][]byte{[]byte("k1")}) + require.NoError(t, err) + require.Equal(t, uint64(10), ts) +} + +func TestMaxLatestCommitTS_DeduplicatesKeys(t *testing.T) { + t.Parallel() + + ctx := context.Background() + st := store.NewMVCCStore() + require.NoError(t, st.PutAt(ctx, []byte("a"), []byte("v1"), 10, 0)) + require.NoError(t, st.PutAt(ctx, []byte("b"), []byte("v2"), 20, 0)) + + keys := [][]byte{ + []byte("a"), + []byte("b"), + []byte("a"), + []byte("b"), + } + ts, err := MaxLatestCommitTS(ctx, st, keys) + require.NoError(t, err) + require.Equal(t, uint64(20), ts) +} + +func TestMaxLatestCommitTS_ReturnsError(t *testing.T) { + t.Parallel() + + ctx := context.Background() + st := store.NewMVCCStore() + require.NoError(t, st.PutAt(ctx, []byte("a"), []byte("v1"), 10, 0)) + + wrapped := erroringLatestCommitStore{ + MVCCStore: st, + key: []byte("b"), + } + ts, err := MaxLatestCommitTS(ctx, wrapped, [][]byte{[]byte("a"), []byte("b")}) + require.Error(t, err) + require.True(t, errors.Is(err, ErrTestLatestCommitTS)) + require.Equal(t, uint64(0), ts) +} diff --git a/kv/leader_proxy.go b/kv/leader_proxy.go new file mode 100644 index 00000000..f6839014 --- /dev/null +++ b/kv/leader_proxy.go @@ -0,0 +1,93 @@ +package kv + +import ( + "context" + "io" + "time" + + pb "github.com/bootjp/elastickv/proto" + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" +) + +const leaderForwardTimeout = 5 * time.Second + +// LeaderProxy forwards transactional requests to the current raft leader when +// the local node is not the leader. +type LeaderProxy struct { + raft *raft.Raft + tm *TransactionManager + + connCache GRPCConnCache +} + +// NewLeaderProxy creates a leader-aware transactional proxy for a raft group. +func NewLeaderProxy(r *raft.Raft) *LeaderProxy { + return &LeaderProxy{ + raft: r, + tm: NewTransaction(r), + } +} + +func (p *LeaderProxy) Commit(reqs []*pb.Request) (*TransactionResponse, error) { + if p.raft.State() != raft.Leader { + return p.forward(reqs) + } + // Verify leadership with a quorum to avoid accepting writes on a stale leader. + if err := p.raft.VerifyLeader().Error(); err != nil { + return p.forward(reqs) + } + return p.tm.Commit(reqs) +} + +func (p *LeaderProxy) Abort(reqs []*pb.Request) (*TransactionResponse, error) { + if p.raft.State() != raft.Leader { + return p.forward(reqs) + } + // Verify leadership with a quorum to avoid accepting aborts on a stale leader. + if err := p.raft.VerifyLeader().Error(); err != nil { + return p.forward(reqs) + } + return p.tm.Abort(reqs) +} + +func (p *LeaderProxy) forward(reqs []*pb.Request) (*TransactionResponse, error) { + if len(reqs) == 0 { + return &TransactionResponse{}, nil + } + addr, _ := p.raft.LeaderWithID() + if addr == "" { + return nil, errors.WithStack(ErrLeaderNotFound) + } + + conn, err := p.connCache.ConnFor(addr) + if err != nil { + return nil, err + } + + cli := pb.NewInternalClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), leaderForwardTimeout) + defer cancel() + + resp, err := cli.Forward(ctx, &pb.ForwardRequest{ + IsTxn: reqs[0].IsTxn, + Requests: reqs, + }) + if err != nil { + return nil, errors.WithStack(err) + } + if !resp.Success { + return nil, ErrInvalidRequest + } + return &TransactionResponse{CommitIndex: resp.CommitIndex}, nil +} + +var _ Transactional = (*LeaderProxy)(nil) +var _ io.Closer = (*LeaderProxy)(nil) + +func (p *LeaderProxy) Close() error { + if p == nil { + return nil + } + return p.connCache.Close() +} diff --git a/kv/leader_proxy_test.go b/kv/leader_proxy_test.go new file mode 100644 index 00000000..6d428f8d --- /dev/null +++ b/kv/leader_proxy_test.go @@ -0,0 +1,158 @@ +package kv + +import ( + "context" + "net" + "sync" + "testing" + "time" + + pb "github.com/bootjp/elastickv/proto" + "github.com/bootjp/elastickv/store" + "github.com/hashicorp/raft" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +type fakeInternal struct { + pb.UnimplementedInternalServer + + mu sync.Mutex + calls int + lastReq *pb.ForwardRequest + resp *pb.ForwardResponse +} + +func (f *fakeInternal) Forward(_ context.Context, req *pb.ForwardRequest) (*pb.ForwardResponse, error) { + f.mu.Lock() + defer f.mu.Unlock() + f.calls++ + f.lastReq = req + if f.resp != nil { + return f.resp, nil + } + return &pb.ForwardResponse{Success: true, CommitIndex: 0}, nil +} + +func TestLeaderProxy_CommitLocalWhenLeader(t *testing.T) { + t.Parallel() + + st := store.NewMVCCStore() + r, stop := newSingleRaft(t, "lp-local", NewKvFSM(st)) + defer stop() + + p := NewLeaderProxy(r) + + reqs := []*pb.Request{ + { + IsTxn: false, + Phase: pb.Phase_NONE, + Ts: 10, + Mutations: []*pb.Mutation{ + {Op: pb.Op_PUT, Key: []byte("k"), Value: []byte("v")}, + }, + }, + } + resp, err := p.Commit(reqs) + require.NoError(t, err) + require.NotNil(t, resp) + require.Greater(t, resp.CommitIndex, uint64(0)) + + got, err := st.GetAt(context.Background(), []byte("k"), ^uint64(0)) + require.NoError(t, err) + require.Equal(t, []byte("v"), got) +} + +func TestLeaderProxy_ForwardsWhenFollower(t *testing.T) { + t.Parallel() + + var lc net.ListenConfig + lis, err := lc.Listen(context.Background(), "tcp", "127.0.0.1:0") + require.NoError(t, err) + + svc := &fakeInternal{resp: &pb.ForwardResponse{Success: true, CommitIndex: 123}} + srv := grpc.NewServer() + pb.RegisterInternalServer(srv, svc) + go func() { _ = srv.Serve(lis) }() + t.Cleanup(func() { + srv.Stop() + _ = lis.Close() + }) + + leaderAddr, leaderTrans := raft.NewInmemTransport(raft.ServerAddress(lis.Addr().String())) + followerAddr, followerTrans := raft.NewInmemTransport("follower") + leaderTrans.Connect(followerAddr, followerTrans) + followerTrans.Connect(leaderAddr, leaderTrans) + + raftCfg := raft.Configuration{ + Servers: []raft.Server{ + {Suffrage: raft.Voter, ID: "leader", Address: leaderAddr}, + {Suffrage: raft.Voter, ID: "follower", Address: followerAddr}, + }, + } + + leader := func() *raft.Raft { + c := raft.DefaultConfig() + c.LocalID = "leader" + c.HeartbeatTimeout = 50 * time.Millisecond + c.ElectionTimeout = 100 * time.Millisecond + c.LeaderLeaseTimeout = 50 * time.Millisecond + + ldb := raft.NewInmemStore() + sdb := raft.NewInmemStore() + fss := raft.NewInmemSnapshotStore() + r, err := raft.NewRaft(c, NewKvFSM(store.NewMVCCStore()), ldb, sdb, fss, leaderTrans) + require.NoError(t, err) + require.NoError(t, r.BootstrapCluster(raftCfg).Error()) + t.Cleanup(func() { _ = r.Shutdown().Error() }) + return r + }() + + follower := func() *raft.Raft { + c := raft.DefaultConfig() + c.LocalID = "follower" + c.HeartbeatTimeout = 250 * time.Millisecond + c.ElectionTimeout = 500 * time.Millisecond + c.LeaderLeaseTimeout = 250 * time.Millisecond + + ldb := raft.NewInmemStore() + sdb := raft.NewInmemStore() + fss := raft.NewInmemSnapshotStore() + r, err := raft.NewRaft(c, NewKvFSM(store.NewMVCCStore()), ldb, sdb, fss, followerTrans) + require.NoError(t, err) + require.NoError(t, r.BootstrapCluster(raftCfg).Error()) + t.Cleanup(func() { _ = r.Shutdown().Error() }) + return r + }() + + require.Eventually(t, func() bool { return leader.State() == raft.Leader }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return follower.State() == raft.Follower }, 5*time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { + addr, _ := follower.LeaderWithID() + return addr == leaderAddr + }, 5*time.Second, 10*time.Millisecond) + + p := NewLeaderProxy(follower) + t.Cleanup(func() { _ = p.connCache.Close() }) + + reqs := []*pb.Request{ + { + IsTxn: false, + Phase: pb.Phase_NONE, + Ts: 10, + Mutations: []*pb.Mutation{ + {Op: pb.Op_PUT, Key: []byte("k"), Value: []byte("v")}, + }, + }, + } + + resp, err := p.Commit(reqs) + require.NoError(t, err) + require.Equal(t, uint64(123), resp.CommitIndex) + + svc.mu.Lock() + defer svc.mu.Unlock() + require.Equal(t, 1, svc.calls) + require.NotNil(t, svc.lastReq) + require.Len(t, svc.lastReq.Requests, 1) +} diff --git a/kv/leader_routed_store.go b/kv/leader_routed_store.go new file mode 100644 index 00000000..43fe1a45 --- /dev/null +++ b/kv/leader_routed_store.go @@ -0,0 +1,249 @@ +package kv + +import ( + "bytes" + "context" + "io" + + pb "github.com/bootjp/elastickv/proto" + "github.com/bootjp/elastickv/store" + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" +) + +// LeaderRoutedStore is an MVCCStore wrapper that serves reads from the local +// store only when leadership is verified; otherwise it proxies reads to the +// current leader via gRPC. +// +// This is intended for single-raft-group deployments where the underlying +// store itself is not leader-aware (e.g. *store.MVCCStore). +// +// Writes and maintenance operations are delegated to the local store. +type LeaderRoutedStore struct { + local store.MVCCStore + coordinator Coordinator + + connCache GRPCConnCache +} + +func NewLeaderRoutedStore(local store.MVCCStore, coordinator Coordinator) *LeaderRoutedStore { + return &LeaderRoutedStore{ + local: local, + coordinator: coordinator, + } +} + +func (s *LeaderRoutedStore) leaderOKForKey(key []byte) bool { + if s.coordinator == nil { + return true + } + if !s.coordinator.IsLeaderForKey(key) { + return false + } + return s.coordinator.VerifyLeaderForKey(key) == nil +} + +func (s *LeaderRoutedStore) leaderAddrForKey(key []byte) raft.ServerAddress { + if s.coordinator == nil { + return "" + } + return s.coordinator.RaftLeaderForKey(key) +} + +func (s *LeaderRoutedStore) proxyRawGet(ctx context.Context, key []byte, ts uint64) ([]byte, error) { + addr := s.leaderAddrForKey(key) + if addr == "" { + return nil, errors.WithStack(ErrLeaderNotFound) + } + + conn, err := s.connCache.ConnFor(addr) + if err != nil { + return nil, err + } + + cli := pb.NewRawKVClient(conn) + resp, err := cli.RawGet(ctx, &pb.RawGetRequest{Key: key, Ts: ts}) + if err != nil { + return nil, errors.WithStack(err) + } + if resp.Value == nil { + return nil, store.ErrKeyNotFound + } + return resp.Value, nil +} + +func (s *LeaderRoutedStore) proxyRawLatestCommitTS(ctx context.Context, key []byte) (uint64, bool, error) { + addr := s.leaderAddrForKey(key) + if addr == "" { + return 0, false, errors.WithStack(ErrLeaderNotFound) + } + + conn, err := s.connCache.ConnFor(addr) + if err != nil { + return 0, false, err + } + + cli := pb.NewRawKVClient(conn) + resp, err := cli.RawLatestCommitTS(ctx, &pb.RawLatestCommitTSRequest{Key: key}) + if err != nil { + return 0, false, errors.WithStack(err) + } + return resp.Ts, resp.Exists, nil +} + +func (s *LeaderRoutedStore) proxyRawScanAt(ctx context.Context, start []byte, end []byte, limit int, ts uint64) ([]*store.KVPair, error) { + addr := s.leaderAddrForKey(start) + if addr == "" { + return nil, errors.WithStack(ErrLeaderNotFound) + } + + conn, err := s.connCache.ConnFor(addr) + if err != nil { + return nil, err + } + + cli := pb.NewRawKVClient(conn) + resp, err := cli.RawScanAt(ctx, &pb.RawScanAtRequest{ + StartKey: start, + EndKey: end, + Limit: int64(limit), + Ts: ts, + }) + if err != nil { + return nil, errors.WithStack(err) + } + + out := make([]*store.KVPair, 0, len(resp.Kv)) + for _, kvp := range resp.Kv { + out = append(out, &store.KVPair{ + Key: bytes.Clone(kvp.Key), + Value: bytes.Clone(kvp.Value), + }) + } + return out, nil +} + +func (s *LeaderRoutedStore) GetAt(ctx context.Context, key []byte, ts uint64) ([]byte, error) { + if s == nil || s.local == nil { + return nil, store.ErrKeyNotFound + } + if s.leaderOKForKey(key) { + val, err := s.local.GetAt(ctx, key, ts) + return val, errors.WithStack(err) + } + return s.proxyRawGet(ctx, key, ts) +} + +func (s *LeaderRoutedStore) ExistsAt(ctx context.Context, key []byte, ts uint64) (bool, error) { + v, err := s.GetAt(ctx, key, ts) + if err != nil { + if errors.Is(err, store.ErrKeyNotFound) { + return false, nil + } + return false, err + } + return v != nil, nil +} + +func (s *LeaderRoutedStore) ScanAt(ctx context.Context, start []byte, end []byte, limit int, ts uint64) ([]*store.KVPair, error) { + if s == nil || s.local == nil { + return []*store.KVPair{}, nil + } + if limit <= 0 { + return []*store.KVPair{}, nil + } + if s.leaderOKForKey(start) { + kvs, err := s.local.ScanAt(ctx, start, end, limit, ts) + return kvs, errors.WithStack(err) + } + return s.proxyRawScanAt(ctx, start, end, limit, ts) +} + +func (s *LeaderRoutedStore) PutAt(ctx context.Context, key []byte, value []byte, commitTS uint64, expireAt uint64) error { + if s == nil || s.local == nil { + return errors.WithStack(store.ErrNotSupported) + } + return errors.WithStack(s.local.PutAt(ctx, key, value, commitTS, expireAt)) +} + +func (s *LeaderRoutedStore) DeleteAt(ctx context.Context, key []byte, commitTS uint64) error { + if s == nil || s.local == nil { + return errors.WithStack(store.ErrNotSupported) + } + return errors.WithStack(s.local.DeleteAt(ctx, key, commitTS)) +} + +func (s *LeaderRoutedStore) PutWithTTLAt(ctx context.Context, key []byte, value []byte, commitTS uint64, expireAt uint64) error { + if s == nil || s.local == nil { + return errors.WithStack(store.ErrNotSupported) + } + return errors.WithStack(s.local.PutWithTTLAt(ctx, key, value, commitTS, expireAt)) +} + +func (s *LeaderRoutedStore) ExpireAt(ctx context.Context, key []byte, expireAt uint64, commitTS uint64) error { + if s == nil || s.local == nil { + return errors.WithStack(store.ErrNotSupported) + } + return errors.WithStack(s.local.ExpireAt(ctx, key, expireAt, commitTS)) +} + +func (s *LeaderRoutedStore) LatestCommitTS(ctx context.Context, key []byte) (uint64, bool, error) { + if s == nil || s.local == nil { + return 0, false, nil + } + if s.leaderOKForKey(key) { + ts, exists, err := s.local.LatestCommitTS(ctx, key) + return ts, exists, errors.WithStack(err) + } + return s.proxyRawLatestCommitTS(ctx, key) +} + +func (s *LeaderRoutedStore) ApplyMutations(ctx context.Context, mutations []*store.KVPairMutation, startTS, commitTS uint64) error { + if s == nil || s.local == nil { + return errors.WithStack(store.ErrNotSupported) + } + return errors.WithStack(s.local.ApplyMutations(ctx, mutations, startTS, commitTS)) +} + +func (s *LeaderRoutedStore) LastCommitTS() uint64 { + if s == nil || s.local == nil { + return 0 + } + return s.local.LastCommitTS() +} + +func (s *LeaderRoutedStore) Compact(ctx context.Context, minTS uint64) error { + if s == nil || s.local == nil { + return errors.WithStack(store.ErrNotSupported) + } + return errors.WithStack(s.local.Compact(ctx, minTS)) +} + +func (s *LeaderRoutedStore) Snapshot() (io.ReadWriter, error) { + if s == nil || s.local == nil { + return nil, errors.WithStack(store.ErrNotSupported) + } + snap, err := s.local.Snapshot() + if err != nil { + return nil, errors.WithStack(err) + } + return snap, nil +} + +func (s *LeaderRoutedStore) Restore(buf io.Reader) error { + if s == nil || s.local == nil { + return errors.WithStack(store.ErrNotSupported) + } + return errors.WithStack(s.local.Restore(buf)) +} + +func (s *LeaderRoutedStore) Close() error { + if s == nil { + return nil + } + // LeaderRoutedStore is a routing wrapper; it does not own the underlying + // store's lifecycle. Close only releases resources owned by the wrapper. + return s.connCache.Close() +} + +var _ store.MVCCStore = (*LeaderRoutedStore)(nil) diff --git a/kv/shard_key.go b/kv/shard_key.go new file mode 100644 index 00000000..f1a06444 --- /dev/null +++ b/kv/shard_key.go @@ -0,0 +1,15 @@ +package kv + +import "github.com/bootjp/elastickv/store" + +// routeKey normalizes internal keys (e.g., list metadata/items) to the logical +// user key used for shard routing. +func routeKey(key []byte) []byte { + if key == nil { + return nil + } + if user := store.ExtractListUserKey(key); user != nil { + return user + } + return key +} diff --git a/kv/shard_router.go b/kv/shard_router.go index 34038474..0b99f167 100644 --- a/kv/shard_router.go +++ b/kv/shard_router.go @@ -11,14 +11,19 @@ import ( ) // ShardRouter routes requests to multiple raft groups based on key ranges. -// It does not provide transactional guarantees across shards; commits are executed -// per shard and failures may leave partial results. +// +// Cross-shard transactions are not supported. They require distributed +// coordination (for example, 2PC) to ensure atomicity. +// +// Non-transactional request batches may still partially succeed across shards. type ShardRouter struct { engine *distribution.Engine mu sync.RWMutex groups map[uint64]*routerGroup } +var ErrCrossShardTransactionNotSupported = errors.New("cross-shard transactions are not supported") + type routerGroup struct { tm Transactional store store.MVCCStore @@ -53,11 +58,31 @@ func (s *ShardRouter) Abort(reqs []*pb.Request) (*TransactionResponse, error) { } func (s *ShardRouter) process(reqs []*pb.Request, fn func(*routerGroup, []*pb.Request) (*TransactionResponse, error)) (*TransactionResponse, error) { + if len(reqs) == 0 { + return nil, errors.WithStack(ErrInvalidRequest) + } + grouped, err := s.groupRequests(reqs) if err != nil { return nil, errors.WithStack(err) } + if err := validateShardBatch(reqs[0], grouped); err != nil { + return nil, errors.WithStack(err) + } + + return s.processGrouped(grouped, fn) +} + +func validateShardBatch(first *pb.Request, grouped map[uint64][]*pb.Request) error { + // Avoid partial commits for transactional batches spanning shards. + if first.IsTxn && len(grouped) > 1 { + return ErrCrossShardTransactionNotSupported + } + return nil +} + +func (s *ShardRouter) processGrouped(grouped map[uint64][]*pb.Request, fn func(*routerGroup, []*pb.Request) (*TransactionResponse, error)) (*TransactionResponse, error) { var firstErr error var maxIndex uint64 for gid, rs := range grouped { @@ -92,10 +117,16 @@ func (s *ShardRouter) getGroup(id uint64) (*routerGroup, bool) { func (s *ShardRouter) groupRequests(reqs []*pb.Request) (map[uint64][]*pb.Request, error) { batches := make(map[uint64][]*pb.Request) for _, r := range reqs { - if len(r.Mutations) == 0 { + if r == nil { + return nil, ErrInvalidRequest + } + if len(r.Mutations) == 0 || r.Mutations[0] == nil { + return nil, ErrInvalidRequest + } + key := routeKey(r.Mutations[0].Key) + if len(key) == 0 { return nil, ErrInvalidRequest } - key := r.Mutations[0].Key route, ok := s.engine.GetRoute(key) if !ok { return nil, errors.Wrapf(ErrInvalidRequest, "no route for key %q", key) @@ -107,7 +138,7 @@ func (s *ShardRouter) groupRequests(reqs []*pb.Request) (map[uint64][]*pb.Reques // Get retrieves a key routed to the correct shard. func (s *ShardRouter) Get(ctx context.Context, key []byte) ([]byte, error) { - route, ok := s.engine.GetRoute(key) + route, ok := s.engine.GetRoute(routeKey(key)) if !ok { return nil, errors.Wrapf(ErrInvalidRequest, "no route for key %q", key) } diff --git a/kv/shard_router_test.go b/kv/shard_router_test.go index 7016e4df..a4e855eb 100644 --- a/kv/shard_router_test.go +++ b/kv/shard_router_test.go @@ -274,3 +274,31 @@ func TestShardRouterCommitFailure(t *testing.T) { t.Fatalf("unexpected abort on successful group") } } + +func TestShardRouterRoutesListKeys(t *testing.T) { + e := distribution.NewEngine() + e.UpdateRoute([]byte("a"), []byte("m"), 1) + e.UpdateRoute([]byte("m"), nil, 2) + + router := NewShardRouter(e) + + ok := &fakeTM{} + fail := &fakeTM{} + router.Register(1, ok, nil) + router.Register(2, fail, nil) + + listMetaKey := store.ListMetaKey([]byte("b")) + reqs := []*pb.Request{ + {IsTxn: false, Phase: pb.Phase_NONE, Mutations: []*pb.Mutation{{Op: pb.Op_PUT, Key: listMetaKey, Value: []byte("v")}}}, + } + + if _, err := router.Commit(reqs); err != nil { + t.Fatalf("commit: %v", err) + } + if ok.commitCalls != 1 { + t.Fatalf("expected commit routed to group1") + } + if fail.commitCalls != 0 { + t.Fatalf("unexpected commit on group2") + } +} diff --git a/kv/shard_store.go b/kv/shard_store.go new file mode 100644 index 00000000..dd3b7d9d --- /dev/null +++ b/kv/shard_store.go @@ -0,0 +1,446 @@ +package kv + +import ( + "bytes" + "context" + "io" + "sort" + + "github.com/bootjp/elastickv/distribution" + pb "github.com/bootjp/elastickv/proto" + "github.com/bootjp/elastickv/store" + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" +) + +// ShardStore routes MVCC reads to shard-specific stores and proxies to leaders when needed. +type ShardStore struct { + engine *distribution.Engine + groups map[uint64]*ShardGroup + + connCache GRPCConnCache +} + +var ErrCrossShardMutationBatchNotSupported = errors.New("cross-shard mutation batches are not supported") + +// NewShardStore creates a sharded MVCC store wrapper. +func NewShardStore(engine *distribution.Engine, groups map[uint64]*ShardGroup) *ShardStore { + return &ShardStore{ + engine: engine, + groups: groups, + } +} + +func (s *ShardStore) GetAt(ctx context.Context, key []byte, ts uint64) ([]byte, error) { + g, ok := s.groupForKey(key) + if !ok || g.Store == nil { + return nil, store.ErrKeyNotFound + } + + // Some tests use ShardStore without raft; in that case serve reads locally. + if g.Raft == nil { + val, err := g.Store.GetAt(ctx, key, ts) + if err != nil { + return nil, errors.WithStack(err) + } + return val, nil + } + + // Verify leadership with a quorum before serving reads from local state to + // avoid stale results from a deposed leader. + if g.Raft.State() == raft.Leader { + if err := g.Raft.VerifyLeader().Error(); err == nil { + val, err := g.Store.GetAt(ctx, key, ts) + if err != nil { + return nil, errors.WithStack(err) + } + return val, nil + } + } + return s.proxyRawGet(ctx, g, key, ts) +} + +func (s *ShardStore) ExistsAt(ctx context.Context, key []byte, ts uint64) (bool, error) { + v, err := s.GetAt(ctx, key, ts) + if err != nil { + if errors.Is(err, store.ErrKeyNotFound) { + return false, nil + } + return false, err + } + return v != nil, nil +} + +func (s *ShardStore) ScanAt(ctx context.Context, start []byte, end []byte, limit int, ts uint64) ([]*store.KVPair, error) { + if limit <= 0 { + return []*store.KVPair{}, nil + } + + routes, clampToRoutes := s.routesForScan(start, end) + out, err := s.scanRoutesAt(ctx, routes, start, end, limit, ts, clampToRoutes) + if err != nil { + return nil, err + } + + sort.Slice(out, func(i, j int) bool { + return bytes.Compare(out[i].Key, out[j].Key) < 0 + }) + if len(out) > limit { + out = out[:limit] + } + return out, nil +} + +func (s *ShardStore) routesForScan(start []byte, end []byte) ([]distribution.Route, bool) { + // For internal list keys, shard routing is based on the logical user key + // rather than the raw key prefix. + if userKey := store.ExtractListUserKey(start); userKey != nil { + route, ok := s.engine.GetRoute(userKey) + if !ok { + return []distribution.Route{}, false + } + return []distribution.Route{route}, false + } + + routes := s.engine.GetIntersectingRoutes(start, end) + // If the scan can include internal list keys (which use a fixed prefix), + // avoid clamping to shard range bounds because those keys may be ordered + // before the shard range start in raw keyspace. + if len(start) == 0 { + return routes, false + } + + return routes, true +} + +func (s *ShardStore) scanRoutesAt(ctx context.Context, routes []distribution.Route, start []byte, end []byte, limit int, ts uint64, clampToRoutes bool) ([]*store.KVPair, error) { + out := make([]*store.KVPair, 0) + for _, route := range routes { + scanStart := start + scanEnd := end + if clampToRoutes { + scanStart = clampScanStart(start, route.Start) + scanEnd = clampScanEnd(end, route.End) + } + + // Fetch up to 'limit' items from each shard. The final result will be + // sorted and truncated by ScanAt. + kvs, err := s.scanRouteAt(ctx, route, scanStart, scanEnd, limit, ts) + if err != nil { + return nil, err + } + out = append(out, kvs...) + } + return out, nil +} + +func (s *ShardStore) scanRouteAt(ctx context.Context, route distribution.Route, start []byte, end []byte, limit int, ts uint64) ([]*store.KVPair, error) { + g, ok := s.groupForID(route.GroupID) + if !ok || g == nil || g.Store == nil { + return nil, nil + } + + if g.Raft == nil { + kvs, err := g.Store.ScanAt(ctx, start, end, limit, ts) + if err != nil { + return nil, errors.WithStack(err) + } + return kvs, nil + } + + // Reads should come from the shard's leader to avoid returning stale or + // incomplete results when this node is a follower for a given shard. + if g.Raft.State() == raft.Leader { + if err := g.Raft.VerifyLeader().Error(); err == nil { + kvs, err := g.Store.ScanAt(ctx, start, end, limit, ts) + if err != nil { + return nil, errors.WithStack(err) + } + return kvs, nil + } + } + + kvs, err := s.proxyRawScanAt(ctx, g, start, end, limit, ts) + if err != nil { + return nil, err + } + return kvs, nil +} + +func (s *ShardStore) groupForID(groupID uint64) (*ShardGroup, bool) { + g, ok := s.groups[groupID] + return g, ok +} + +func clampScanStart(start []byte, routeStart []byte) []byte { + if start == nil { + return routeStart + } + if bytes.Compare(start, routeStart) < 0 { + return routeStart + } + return start +} + +func clampScanEnd(end []byte, routeEnd []byte) []byte { + if routeEnd == nil { + return end + } + if end == nil { + return routeEnd + } + if bytes.Compare(end, routeEnd) > 0 { + return routeEnd + } + return end +} + +func (s *ShardStore) PutAt(ctx context.Context, key []byte, value []byte, commitTS uint64, expireAt uint64) error { + g, ok := s.groupForKey(key) + if !ok || g.Store == nil { + return store.ErrNotSupported + } + return errors.WithStack(g.Store.PutAt(ctx, key, value, commitTS, expireAt)) +} + +func (s *ShardStore) DeleteAt(ctx context.Context, key []byte, commitTS uint64) error { + g, ok := s.groupForKey(key) + if !ok || g.Store == nil { + return store.ErrNotSupported + } + return errors.WithStack(g.Store.DeleteAt(ctx, key, commitTS)) +} + +func (s *ShardStore) PutWithTTLAt(ctx context.Context, key []byte, value []byte, commitTS uint64, expireAt uint64) error { + g, ok := s.groupForKey(key) + if !ok || g.Store == nil { + return store.ErrNotSupported + } + return errors.WithStack(g.Store.PutWithTTLAt(ctx, key, value, commitTS, expireAt)) +} + +func (s *ShardStore) ExpireAt(ctx context.Context, key []byte, expireAt uint64, commitTS uint64) error { + g, ok := s.groupForKey(key) + if !ok || g.Store == nil { + return store.ErrNotSupported + } + return errors.WithStack(g.Store.ExpireAt(ctx, key, expireAt, commitTS)) +} + +func (s *ShardStore) LatestCommitTS(ctx context.Context, key []byte) (uint64, bool, error) { + g, ok := s.groupForKey(key) + if !ok || g.Store == nil { + return 0, false, nil + } + + if g.Raft == nil { + ts, exists, err := g.Store.LatestCommitTS(ctx, key) + if err != nil { + return 0, false, errors.WithStack(err) + } + return ts, exists, nil + } + + // Avoid returning a stale watermark when our local raft instance is a + // deposed leader. + if g.Raft.State() == raft.Leader { + if err := g.Raft.VerifyLeader().Error(); err == nil { + ts, exists, err := g.Store.LatestCommitTS(ctx, key) + if err != nil { + return 0, false, errors.WithStack(err) + } + return ts, exists, nil + } + } + + return s.proxyLatestCommitTS(ctx, g, key) +} + +func (s *ShardStore) proxyLatestCommitTS(ctx context.Context, g *ShardGroup, key []byte) (uint64, bool, error) { + if g == nil || g.Raft == nil { + return 0, false, nil + } + addr, _ := g.Raft.LeaderWithID() + if addr == "" { + return 0, false, errors.WithStack(ErrLeaderNotFound) + } + + conn, err := s.connCache.ConnFor(addr) + if err != nil { + return 0, false, err + } + + cli := pb.NewRawKVClient(conn) + resp, err := cli.RawLatestCommitTS(ctx, &pb.RawLatestCommitTSRequest{Key: key}) + if err != nil { + return 0, false, errors.WithStack(err) + } + return resp.Ts, resp.Exists, nil +} + +// ApplyMutations applies a batch of mutations to the correct shard store. +// +// All mutations must belong to the same shard. Cross-shard mutation batches are +// not supported. +func (s *ShardStore) ApplyMutations(ctx context.Context, mutations []*store.KVPairMutation, startTS, commitTS uint64) error { + if len(mutations) == 0 { + return nil + } + // Determine the shard group for the first mutation. + firstGroup, ok := s.groupForKey(mutations[0].Key) + if !ok || firstGroup == nil || firstGroup.Store == nil { + return store.ErrNotSupported + } + // Ensure that all mutations in the batch belong to the same shard. + for i := 1; i < len(mutations); i++ { + g, ok := s.groupForKey(mutations[i].Key) + if !ok || g == nil || g.Store == nil { + return store.ErrNotSupported + } + if g != firstGroup { + return errors.WithStack(ErrCrossShardMutationBatchNotSupported) + } + } + return errors.WithStack(firstGroup.Store.ApplyMutations(ctx, mutations, startTS, commitTS)) +} + +func (s *ShardStore) LastCommitTS() uint64 { + var max uint64 + for _, g := range s.groups { + if g == nil || g.Store == nil { + continue + } + if ts := g.Store.LastCommitTS(); ts > max { + max = ts + } + } + return max +} + +func (s *ShardStore) Compact(ctx context.Context, minTS uint64) error { + for _, g := range s.groups { + if g == nil || g.Store == nil { + continue + } + if err := g.Store.Compact(ctx, minTS); err != nil { + return errors.WithStack(err) + } + } + return nil +} + +func (s *ShardStore) Snapshot() (io.ReadWriter, error) { + return nil, store.ErrNotSupported +} + +func (s *ShardStore) Restore(_ io.Reader) error { + return store.ErrNotSupported +} + +func (s *ShardStore) Close() error { + var first error + for _, g := range s.groups { + if err := s.closeGroup(g); err != nil && first == nil { + first = err + } + } + + if err := s.connCache.Close(); err != nil && first == nil { + first = err + } + + return first +} + +func (s *ShardStore) closeGroup(g *ShardGroup) error { + if g == nil { + return nil + } + + var first error + if g.Store != nil { + if err := g.Store.Close(); err != nil && first == nil { + first = errors.WithStack(err) + } + } + + if closer, ok := g.Txn.(io.Closer); ok { + if err := closer.Close(); err != nil && first == nil { + first = errors.WithStack(err) + } + } + + return first +} + +func (s *ShardStore) groupForKey(key []byte) (*ShardGroup, bool) { + route, ok := s.engine.GetRoute(routeKey(key)) + if !ok { + return nil, false + } + g, ok := s.groups[route.GroupID] + return g, ok +} + +func (s *ShardStore) proxyRawGet(ctx context.Context, g *ShardGroup, key []byte, ts uint64) ([]byte, error) { + if g == nil || g.Raft == nil { + return nil, store.ErrKeyNotFound + } + addr, _ := g.Raft.LeaderWithID() + if addr == "" { + return nil, errors.WithStack(ErrLeaderNotFound) + } + + conn, err := s.connCache.ConnFor(addr) + if err != nil { + return nil, err + } + + cli := pb.NewRawKVClient(conn) + resp, err := cli.RawGet(ctx, &pb.RawGetRequest{Key: key, Ts: ts}) + if err != nil { + return nil, errors.WithStack(err) + } + if resp.Value == nil { + return nil, store.ErrKeyNotFound + } + return resp.Value, nil +} + +func (s *ShardStore) proxyRawScanAt(ctx context.Context, g *ShardGroup, start []byte, end []byte, limit int, ts uint64) ([]*store.KVPair, error) { + if g == nil || g.Raft == nil { + return nil, store.ErrNotSupported + } + addr, _ := g.Raft.LeaderWithID() + if addr == "" { + return nil, errors.WithStack(ErrLeaderNotFound) + } + + conn, err := s.connCache.ConnFor(addr) + if err != nil { + return nil, err + } + + cli := pb.NewRawKVClient(conn) + resp, err := cli.RawScanAt(ctx, &pb.RawScanAtRequest{ + StartKey: start, + EndKey: end, + Limit: int64(limit), + Ts: ts, + }) + if err != nil { + return nil, errors.WithStack(err) + } + + out := make([]*store.KVPair, 0, len(resp.Kv)) + for _, kvp := range resp.Kv { + out = append(out, &store.KVPair{ + Key: bytes.Clone(kvp.Key), + Value: bytes.Clone(kvp.Value), + }) + } + + return out, nil +} + +var _ store.MVCCStore = (*ShardStore)(nil) diff --git a/kv/shard_store_test.go b/kv/shard_store_test.go new file mode 100644 index 00000000..23b7331f --- /dev/null +++ b/kv/shard_store_test.go @@ -0,0 +1,71 @@ +package kv + +import ( + "context" + "testing" + + "github.com/bootjp/elastickv/distribution" + "github.com/bootjp/elastickv/store" + "github.com/stretchr/testify/require" +) + +func TestShardStoreScanAt_IncludesListKeysAcrossShards(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + engine := distribution.NewEngine() + engine.UpdateRoute([]byte(""), []byte("m"), 1) + engine.UpdateRoute([]byte("m"), nil, 2) + + groups := map[uint64]*ShardGroup{ + 1: {Store: store.NewMVCCStore()}, + 2: {Store: store.NewMVCCStore()}, + } + st := NewShardStore(engine, groups) + + require.NoError(t, st.PutAt(ctx, []byte("a"), []byte("va"), 1, 0)) + require.NoError(t, st.PutAt(ctx, []byte("b"), []byte("vb"), 2, 0)) + + userKey := []byte("x") + itemKey := store.ListItemKey(userKey, 0) + require.NoError(t, st.PutAt(ctx, itemKey, []byte("v0"), 3, 0)) + + // A full scan should surface internal list keys that may live on any shard. + kvs, err := st.ScanAt(ctx, []byte(""), nil, 1, ^uint64(0)) + require.NoError(t, err) + require.Len(t, kvs, 1) + require.Equal(t, itemKey, kvs[0].Key) +} + +func TestShardStoreScanAt_RoutesListItemScansByUserKey(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + engine := distribution.NewEngine() + engine.UpdateRoute([]byte(""), []byte("m"), 1) + engine.UpdateRoute([]byte("m"), nil, 2) + + groups := map[uint64]*ShardGroup{ + 1: {Store: store.NewMVCCStore()}, + 2: {Store: store.NewMVCCStore()}, + } + st := NewShardStore(engine, groups) + + userKey := []byte("x") // routes to group 2 + k0 := store.ListItemKey(userKey, 0) + k1 := store.ListItemKey(userKey, 1) + k2 := store.ListItemKey(userKey, 2) + require.NoError(t, st.PutAt(ctx, k0, []byte("v0"), 1, 0)) + require.NoError(t, st.PutAt(ctx, k1, []byte("v1"), 2, 0)) + require.NoError(t, st.PutAt(ctx, k2, []byte("v2"), 3, 0)) + + end := store.ListItemKey(userKey, 3) // exclusive upper bound + kvs, err := st.ScanAt(ctx, k0, end, 10, ^uint64(0)) + require.NoError(t, err) + require.Len(t, kvs, 3) + require.Equal(t, k0, kvs[0].Key) + require.Equal(t, k1, kvs[1].Key) + require.Equal(t, k2, kvs[2].Key) +} diff --git a/kv/sharded_coordinator.go b/kv/sharded_coordinator.go new file mode 100644 index 00000000..009d20f7 --- /dev/null +++ b/kv/sharded_coordinator.go @@ -0,0 +1,278 @@ +package kv + +import ( + "context" + "sort" + + "github.com/bootjp/elastickv/distribution" + pb "github.com/bootjp/elastickv/proto" + "github.com/bootjp/elastickv/store" + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" +) + +type ShardGroup struct { + Raft *raft.Raft + Store store.MVCCStore + Txn Transactional +} + +const txnPhaseCount = 2 + +// ShardedCoordinator routes operations to shard-specific raft groups. +// It issues timestamps via a shared HLC and uses ShardRouter to dispatch. +type ShardedCoordinator struct { + engine *distribution.Engine + router *ShardRouter + groups map[uint64]*ShardGroup + defaultGroup uint64 + clock *HLC + store store.MVCCStore +} + +// NewShardedCoordinator builds a coordinator for the provided shard groups. +// The defaultGroup is used for non-keyed leader checks. +func NewShardedCoordinator(engine *distribution.Engine, groups map[uint64]*ShardGroup, defaultGroup uint64, clock *HLC, st store.MVCCStore) *ShardedCoordinator { + router := NewShardRouter(engine) + for gid, g := range groups { + router.Register(gid, g.Txn, g.Store) + } + return &ShardedCoordinator{ + engine: engine, + router: router, + groups: groups, + defaultGroup: defaultGroup, + clock: clock, + store: st, + } +} + +func (c *ShardedCoordinator) Dispatch(ctx context.Context, reqs *OperationGroup[OP]) (*CoordinateResponse, error) { + if ctx == nil { + ctx = context.Background() + } + + if err := validateOperationGroup(reqs); err != nil { + return nil, err + } + + if reqs.IsTxn && reqs.StartTS == 0 { + startTS, err := c.nextStartTS(ctx, reqs.Elems) + if err != nil { + return nil, err + } + reqs.StartTS = startTS + } + + logs, err := c.requestLogs(reqs) + if err != nil { + return nil, err + } + + r, err := c.router.Commit(logs) + if err != nil { + return nil, errors.WithStack(err) + } + return &CoordinateResponse{CommitIndex: r.CommitIndex}, nil +} + +func (c *ShardedCoordinator) nextStartTS(ctx context.Context, elems []*Elem[OP]) (uint64, error) { + maxTS, err := c.maxLatestCommitTS(ctx, elems) + if err != nil { + return 0, err + } + if c.clock != nil && maxTS > 0 { + c.clock.Observe(maxTS) + } + if c.clock == nil { + return maxTS + 1, nil + } + return c.clock.Next(), nil +} + +func (c *ShardedCoordinator) maxLatestCommitTS(ctx context.Context, elems []*Elem[OP]) (uint64, error) { + if c.store == nil { + return 0, nil + } + + keys := make([][]byte, 0, len(elems)) + for _, e := range elems { + if e == nil || len(e.Key) == 0 { + continue + } + keys = append(keys, e.Key) + } + + return MaxLatestCommitTS(ctx, c.store, keys) +} + +func (c *ShardedCoordinator) IsLeader() bool { + g, ok := c.groups[c.defaultGroup] + if !ok || g.Raft == nil { + return false + } + return g.Raft.State() == raft.Leader +} + +func (c *ShardedCoordinator) VerifyLeader() error { + g, ok := c.groups[c.defaultGroup] + if !ok || g.Raft == nil { + return errors.WithStack(ErrLeaderNotFound) + } + return errors.WithStack(g.Raft.VerifyLeader().Error()) +} + +func (c *ShardedCoordinator) RaftLeader() raft.ServerAddress { + g, ok := c.groups[c.defaultGroup] + if !ok || g.Raft == nil { + return "" + } + addr, _ := g.Raft.LeaderWithID() + return addr +} + +func (c *ShardedCoordinator) IsLeaderForKey(key []byte) bool { + g, ok := c.groupForKey(key) + if !ok || g.Raft == nil { + return false + } + return g.Raft.State() == raft.Leader +} + +func (c *ShardedCoordinator) VerifyLeaderForKey(key []byte) error { + g, ok := c.groupForKey(key) + if !ok || g.Raft == nil { + return errors.WithStack(ErrLeaderNotFound) + } + return errors.WithStack(g.Raft.VerifyLeader().Error()) +} + +func (c *ShardedCoordinator) RaftLeaderForKey(key []byte) raft.ServerAddress { + g, ok := c.groupForKey(key) + if !ok || g.Raft == nil { + return "" + } + addr, _ := g.Raft.LeaderWithID() + return addr +} + +func (c *ShardedCoordinator) Clock() *HLC { + return c.clock +} + +func (c *ShardedCoordinator) groupForKey(key []byte) (*ShardGroup, bool) { + route, ok := c.engine.GetRoute(routeKey(key)) + if !ok { + return nil, false + } + g, ok := c.groups[route.GroupID] + return g, ok +} + +func (c *ShardedCoordinator) toRawRequest(req *Elem[OP]) *pb.Request { + switch req.Op { + case Put: + return &pb.Request{ + IsTxn: false, + Phase: pb.Phase_NONE, + Ts: c.clock.Next(), + Mutations: []*pb.Mutation{ + { + Op: pb.Op_PUT, + Key: req.Key, + Value: req.Value, + }, + }, + } + case Del: + return &pb.Request{ + IsTxn: false, + Phase: pb.Phase_NONE, + Ts: c.clock.Next(), + Mutations: []*pb.Mutation{ + { + Op: pb.Op_DEL, + Key: req.Key, + }, + }, + } + } + panic("unreachable") +} + +var _ Coordinator = (*ShardedCoordinator)(nil) + +func validateOperationGroup(reqs *OperationGroup[OP]) error { + if reqs == nil || len(reqs.Elems) == 0 { + return ErrInvalidRequest + } + for _, e := range reqs.Elems { + if e == nil { + return ErrInvalidRequest + } + } + return nil +} + +func (c *ShardedCoordinator) requestLogs(reqs *OperationGroup[OP]) ([]*pb.Request, error) { + if reqs.IsTxn { + return c.txnLogs(reqs) + } + return c.rawLogs(reqs), nil +} + +func (c *ShardedCoordinator) rawLogs(reqs *OperationGroup[OP]) []*pb.Request { + logs := make([]*pb.Request, 0, len(reqs.Elems)) + for _, req := range reqs.Elems { + logs = append(logs, c.toRawRequest(req)) + } + return logs +} + +func (c *ShardedCoordinator) txnLogs(reqs *OperationGroup[OP]) ([]*pb.Request, error) { + grouped, gids, err := c.groupMutations(reqs.Elems) + if err != nil { + return nil, err + } + if len(gids) > 1 { + return nil, errors.Wrapf( + ErrCrossShardTransactionNotSupported, + "involved_shards=%v", + gids, + ) + } + return buildTxnLogs(reqs.StartTS, grouped, gids), nil +} + +func (c *ShardedCoordinator) groupMutations(reqs []*Elem[OP]) (map[uint64][]*pb.Mutation, []uint64, error) { + grouped := make(map[uint64][]*pb.Mutation) + for _, req := range reqs { + if req == nil { + return nil, nil, ErrInvalidRequest + } + mut := elemToMutation(req) + route, ok := c.engine.GetRoute(routeKey(mut.Key)) + if !ok { + return nil, nil, errors.Wrapf(ErrInvalidRequest, "no route for key %q", mut.Key) + } + grouped[route.GroupID] = append(grouped[route.GroupID], mut) + } + gids := make([]uint64, 0, len(grouped)) + for gid := range grouped { + gids = append(gids, gid) + } + sort.Slice(gids, func(i, j int) bool { return gids[i] < gids[j] }) + return grouped, gids, nil +} + +func buildTxnLogs(startTS uint64, grouped map[uint64][]*pb.Mutation, gids []uint64) []*pb.Request { + logs := make([]*pb.Request, 0, len(gids)*txnPhaseCount) + for _, gid := range gids { + muts := grouped[gid] + logs = append(logs, + &pb.Request{IsTxn: true, Phase: pb.Phase_PREPARE, Ts: startTS, Mutations: muts}, + &pb.Request{IsTxn: true, Phase: pb.Phase_COMMIT, Ts: startTS, Mutations: muts}, + ) + } + return logs +} diff --git a/kv/sharded_integration_test.go b/kv/sharded_integration_test.go new file mode 100644 index 00000000..22989b8f --- /dev/null +++ b/kv/sharded_integration_test.go @@ -0,0 +1,151 @@ +package kv + +import ( + "context" + "testing" + "time" + + "github.com/bootjp/elastickv/distribution" + "github.com/bootjp/elastickv/store" + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" +) + +func newSingleRaft(t *testing.T, id string, fsm raft.FSM) (*raft.Raft, func()) { + t.Helper() + + addr, trans := raft.NewInmemTransport(raft.ServerAddress(id)) + c := raft.DefaultConfig() + c.LocalID = raft.ServerID(id) + c.HeartbeatTimeout = 50 * time.Millisecond + c.ElectionTimeout = 100 * time.Millisecond + c.LeaderLeaseTimeout = 50 * time.Millisecond + + ldb := raft.NewInmemStore() + sdb := raft.NewInmemStore() + fss := raft.NewInmemSnapshotStore() + r, err := raft.NewRaft(c, fsm, ldb, sdb, fss, trans) + if err != nil { + t.Fatalf("new raft: %v", err) + } + cfg := raft.Configuration{ + Servers: []raft.Server{ + { + Suffrage: raft.Voter, + ID: raft.ServerID(id), + Address: addr, + }, + }, + } + if err := r.BootstrapCluster(cfg).Error(); err != nil { + t.Fatalf("bootstrap: %v", err) + } + + for i := 0; i < 100; i++ { + if r.State() == raft.Leader { + break + } + time.Sleep(10 * time.Millisecond) + } + if r.State() != raft.Leader { + t.Fatalf("node %s is not leader", id) + } + + return r, func() { r.Shutdown() } +} + +func TestShardedCoordinatorDispatch(t *testing.T) { + ctx := context.Background() + + engine := distribution.NewEngine() + engine.UpdateRoute([]byte("a"), []byte("m"), 1) + engine.UpdateRoute([]byte("m"), nil, 2) + + s1 := store.NewMVCCStore() + r1, stop1 := newSingleRaft(t, "g1", NewKvFSM(s1)) + defer stop1() + + s2 := store.NewMVCCStore() + r2, stop2 := newSingleRaft(t, "g2", NewKvFSM(s2)) + defer stop2() + + groups := map[uint64]*ShardGroup{ + 1: {Raft: r1, Store: s1, Txn: NewLeaderProxy(r1)}, + 2: {Raft: r2, Store: s2, Txn: NewLeaderProxy(r2)}, + } + + shardStore := NewShardStore(engine, groups) + coord := NewShardedCoordinator(engine, groups, 1, NewHLC(), shardStore) + + ops := &OperationGroup[OP]{ + IsTxn: false, + Elems: []*Elem[OP]{ + {Op: Put, Key: []byte("b"), Value: []byte("v1")}, + {Op: Put, Key: []byte("x"), Value: []byte("v2")}, + }, + } + if _, err := coord.Dispatch(ctx, ops); err != nil { + t.Fatalf("dispatch: %v", err) + } + + readTS := shardStore.LastCommitTS() + v, err := shardStore.GetAt(ctx, []byte("b"), readTS) + if err != nil || string(v) != "v1" { + t.Fatalf("get b: %v %v", v, err) + } + v, err = shardStore.GetAt(ctx, []byte("x"), readTS) + if err != nil || string(v) != "v2" { + t.Fatalf("get x: %v %v", v, err) + } + + if _, err := s1.GetAt(ctx, []byte("x"), readTS); !errors.Is(err, store.ErrKeyNotFound) { + t.Fatalf("expected key x missing in group1, got %v", err) + } + if _, err := s2.GetAt(ctx, []byte("b"), readTS); !errors.Is(err, store.ErrKeyNotFound) { + t.Fatalf("expected key b missing in group2, got %v", err) + } +} + +func TestShardedCoordinatorDispatch_RejectsCrossShardTxn(t *testing.T) { + ctx := context.Background() + + engine := distribution.NewEngine() + engine.UpdateRoute([]byte("a"), []byte("m"), 1) + engine.UpdateRoute([]byte("m"), nil, 2) + + s1 := store.NewMVCCStore() + r1, stop1 := newSingleRaft(t, "g1", NewKvFSM(s1)) + defer stop1() + + s2 := store.NewMVCCStore() + r2, stop2 := newSingleRaft(t, "g2", NewKvFSM(s2)) + defer stop2() + + groups := map[uint64]*ShardGroup{ + 1: {Raft: r1, Store: s1, Txn: NewLeaderProxy(r1)}, + 2: {Raft: r2, Store: s2, Txn: NewLeaderProxy(r2)}, + } + + shardStore := NewShardStore(engine, groups) + coord := NewShardedCoordinator(engine, groups, 1, NewHLC(), shardStore) + + ops := &OperationGroup[OP]{ + IsTxn: true, + Elems: []*Elem[OP]{ + {Op: Put, Key: []byte("b"), Value: []byte("v1")}, + {Op: Put, Key: []byte("x"), Value: []byte("v2")}, + }, + } + if _, err := coord.Dispatch(ctx, ops); err == nil || !errors.Is(err, ErrCrossShardTransactionNotSupported) { + t.Fatalf("expected ErrCrossShardTransactionNotSupported, got %v", err) + } + + // Ensure the rejected transaction didn't write anything. + readTS := ^uint64(0) + if _, err := shardStore.GetAt(ctx, []byte("b"), readTS); !errors.Is(err, store.ErrKeyNotFound) { + t.Fatalf("expected key b missing, got %v", err) + } + if _, err := shardStore.GetAt(ctx, []byte("x"), readTS); !errors.Is(err, store.ErrKeyNotFound) { + t.Fatalf("expected key x missing, got %v", err) + } +} diff --git a/main.go b/main.go index 0013cb13..0c56048c 100644 --- a/main.go +++ b/main.go @@ -3,26 +3,21 @@ package main import ( "context" "flag" - "fmt" "log" "net" - "os" - "path/filepath" "time" "github.com/Jille/raft-grpc-leader-rpc/leaderhealth" - transport "github.com/Jille/raft-grpc-transport" "github.com/Jille/raftadmin" "github.com/bootjp/elastickv/adapter" + "github.com/bootjp/elastickv/distribution" "github.com/bootjp/elastickv/kv" pb "github.com/bootjp/elastickv/proto" "github.com/bootjp/elastickv/store" "github.com/cockroachdb/errors" "github.com/hashicorp/raft" - boltdb "github.com/hashicorp/raft-boltdb/v2" "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/reflection" ) @@ -38,121 +33,198 @@ var ( raftId = flag.String("raftId", "", "Node id used by Raft") raftDir = flag.String("raftDataDir", "data/", "Raft data dir") raftBootstrap = flag.Bool("raftBootstrap", false, "Whether to bootstrap the Raft cluster") + raftGroups = flag.String("raftGroups", "", "Comma-separated raft groups (groupID=host:port,...)") + shardRanges = flag.String("shardRanges", "", "Comma-separated shard ranges (start:end=groupID,...)") + raftRedisMap = flag.String("raftRedisMap", "", "Map of Raft address to Redis address (raftAddr=redisAddr,...)") ) func main() { flag.Parse() + if err := run(); err != nil { + log.Fatalf("%v", err) + } +} + +func run() error { if *raftId == "" { - log.Fatalf("flag --raftId is required") + return errors.New("flag --raftId is required") } ctx := context.Background() var lc net.ListenConfig - _, port, err := net.SplitHostPort(*myAddr) + cfg, err := parseRuntimeConfig(*myAddr, *redisAddr, *raftGroups, *shardRanges, *raftRedisMap) if err != nil { - log.Fatalf("failed to parse local address (%q): %v", *myAddr, err) + return err } - grpcSock, err := lc.Listen(ctx, "tcp", fmt.Sprintf(":%s", port)) + runtimes, shardGroups, err := buildShardGroups(*raftId, *raftDir, cfg.groups, cfg.multi, *raftBootstrap) if err != nil { - log.Fatalf("failed to listen: %v", err) + return err } + defer func() { + for _, rt := range runtimes { + rt.Close() + } + }() - s := store.NewMVCCStore() - kvFSM := kv.NewKvFSM(s) + clock := kv.NewHLC() + shardStore := kv.NewShardStore(cfg.engine, shardGroups) + defer func() { _ = shardStore.Close() }() + coordinate := kv.NewShardedCoordinator(cfg.engine, shardGroups, cfg.defaultGroup, clock, shardStore) + distServer := adapter.NewDistributionServer(cfg.engine) - r, tm, err := NewRaft(ctx, *raftId, *myAddr, kvFSM) - if err != nil { - log.Fatalf("failed to start raft: %v", err) + eg := errgroup.Group{} + if err := startRaftServers(ctx, &lc, &eg, runtimes, shardStore, coordinate, distServer); err != nil { + return err } - - gs := grpc.NewServer() - trx := kv.NewTransaction(r) - coordinate := kv.NewCoordinator(trx, r) - pb.RegisterRawKVServer(gs, adapter.NewGRPCServer(s, coordinate)) - pb.RegisterTransactionalKVServer(gs, adapter.NewGRPCServer(s, coordinate)) - pb.RegisterInternalServer(gs, adapter.NewInternal(trx, r, coordinate.Clock())) - tm.Register(gs) - - leaderhealth.Setup(r, gs, []string{"RawKV", "Example"}) - raftadmin.Register(gs, r) - reflection.Register(gs) - - redisL, err := lc.Listen(ctx, "tcp", *redisAddr) - if err != nil { - log.Fatalf("failed to listen: %v", err) + if err := startRedisServer(ctx, &lc, &eg, *redisAddr, shardStore, coordinate, cfg.leaderRedis); err != nil { + return err } - leaderRedis := map[raft.ServerAddress]string{ - raft.ServerAddress(*myAddr): *redisAddr, - } - - eg := errgroup.Group{} - eg.Go(func() error { - return errors.WithStack(gs.Serve(grpcSock)) - }) - eg.Go(func() error { - return errors.WithStack(adapter.NewRedisServer(redisL, s, coordinate, leaderRedis).Run()) - }) - - err = eg.Wait() - if err != nil { - log.Fatalf("failed to serve: %v", err) + if err := eg.Wait(); err != nil { + return errors.Wrapf(err, "failed to serve") } + return nil } const snapshotRetainCount = 3 -func NewRaft(_ context.Context, myID, myAddress string, fsm raft.FSM) (*raft.Raft, *transport.Manager, error) { - c := raft.DefaultConfig() - c.LocalID = raft.ServerID(myID) - c.HeartbeatTimeout = heartbeatTimeout - c.ElectionTimeout = electionTimeout - c.LeaderLeaseTimeout = leaderLease - - baseDir := filepath.Join(*raftDir, myID) +type runtimeConfig struct { + groups []groupSpec + defaultGroup uint64 + engine *distribution.Engine + leaderRedis map[raft.ServerAddress]string + multi bool +} - ldb, err := boltdb.NewBoltStore(filepath.Join(baseDir, "logs.dat")) +func parseRuntimeConfig(myAddr, redisAddr, raftGroups, shardRanges, raftRedisMap string) (runtimeConfig, error) { + groups, err := parseRaftGroups(raftGroups, myAddr) if err != nil { - return nil, nil, errors.WithStack(err) + return runtimeConfig{}, errors.Wrapf(err, "failed to parse raft groups") } - - sdb, err := boltdb.NewBoltStore(filepath.Join(baseDir, "stable.dat")) + defaultGroup := defaultGroupID(groups) + ranges, err := parseShardRanges(shardRanges, defaultGroup) if err != nil { - return nil, nil, errors.WithStack(err) + return runtimeConfig{}, errors.Wrapf(err, "failed to parse shard ranges") + } + if err := validateShardRanges(ranges, groups); err != nil { + return runtimeConfig{}, errors.Wrapf(err, "invalid shard ranges") } - fss, err := raft.NewFileSnapshotStore(baseDir, snapshotRetainCount, os.Stderr) + engine := buildEngine(ranges) + leaderRedis, err := buildLeaderRedis(groups, redisAddr, raftRedisMap) if err != nil { - return nil, nil, errors.WithStack(err) + return runtimeConfig{}, errors.Wrapf(err, "failed to parse raft redis map") } - tm := transport.New(raft.ServerAddress(myAddress), []grpc.DialOption{ - grpc.WithTransportCredentials(insecure.NewCredentials()), - }) + return runtimeConfig{ + groups: groups, + defaultGroup: defaultGroup, + engine: engine, + leaderRedis: leaderRedis, + multi: len(groups) > 1, + }, nil +} - r, err := raft.NewRaft(c, fsm, ldb, sdb, fss, tm.Transport()) +func buildEngine(ranges []rangeSpec) *distribution.Engine { + engine := distribution.NewEngine() + for _, r := range ranges { + engine.UpdateRoute(r.start, r.end, r.groupID) + } + return engine +} + +func buildLeaderRedis(groups []groupSpec, redisAddr string, raftRedisMap string) (map[raft.ServerAddress]string, error) { + leaderRedis, err := parseRaftRedisMap(raftRedisMap) if err != nil { - return nil, nil, errors.WithStack(err) - } - - if *raftBootstrap { - cfg := raft.Configuration{ - Servers: []raft.Server{ - { - Suffrage: raft.Voter, - ID: raft.ServerID(myID), - Address: raft.ServerAddress(myAddress), - }, - }, + return nil, err + } + for _, g := range groups { + addr := raft.ServerAddress(g.address) + if _, ok := leaderRedis[addr]; !ok { + leaderRedis[addr] = redisAddr } - f := r.BootstrapCluster(cfg) - if err := f.Error(); err != nil { - return nil, nil, errors.WithStack(err) + } + return leaderRedis, nil +} + +func buildShardGroups(raftID string, raftDir string, groups []groupSpec, multi bool, bootstrap bool) ([]*raftGroupRuntime, map[uint64]*kv.ShardGroup, error) { + runtimes := make([]*raftGroupRuntime, 0, len(groups)) + shardGroups := make(map[uint64]*kv.ShardGroup, len(groups)) + for _, g := range groups { + st := store.NewMVCCStore() + fsm := kv.NewKvFSM(st) + r, tm, closeStores, err := newRaftGroup(raftID, g, raftDir, multi, bootstrap, fsm) + if err != nil { + for _, rt := range runtimes { + rt.Close() + } + if r != nil { + _ = r.Shutdown().Error() + } + if tm != nil { + _ = tm.Close() + } + _ = st.Close() + if closeStores != nil { + closeStores() + } + return nil, nil, errors.Wrapf(err, "failed to start raft group %d", g.id) } + runtimes = append(runtimes, &raftGroupRuntime{ + spec: g, + raft: r, + tm: tm, + store: st, + closeStores: closeStores, + }) + shardGroups[g.id] = &kv.ShardGroup{ + Raft: r, + Store: st, + Txn: kv.NewLeaderProxy(r), + } + } + return runtimes, shardGroups, nil +} + +func startRaftServers(ctx context.Context, lc *net.ListenConfig, eg *errgroup.Group, runtimes []*raftGroupRuntime, shardStore *kv.ShardStore, coordinate kv.Coordinator, distServer *adapter.DistributionServer) error { + for _, rt := range runtimes { + gs := grpc.NewServer() + trx := kv.NewTransaction(rt.raft) + grpcSvc := adapter.NewGRPCServer(shardStore, coordinate) + pb.RegisterRawKVServer(gs, grpcSvc) + pb.RegisterTransactionalKVServer(gs, grpcSvc) + pb.RegisterInternalServer(gs, adapter.NewInternal(trx, rt.raft, coordinate.Clock())) + pb.RegisterDistributionServer(gs, distServer) + rt.tm.Register(gs) + leaderhealth.Setup(rt.raft, gs, []string{"RawKV"}) + raftadmin.Register(gs, rt.raft) + reflection.Register(gs) + + grpcSock, err := lc.Listen(ctx, "tcp", rt.spec.address) + if err != nil { + return errors.Wrapf(err, "failed to listen on %s", rt.spec.address) + } + srv := gs + lis := grpcSock + grpcService := grpcSvc + eg.Go(func() error { + defer func() { _ = grpcService.Close() }() + return errors.WithStack(srv.Serve(lis)) + }) } + return nil +} - return r, tm, nil +func startRedisServer(ctx context.Context, lc *net.ListenConfig, eg *errgroup.Group, redisAddr string, shardStore *kv.ShardStore, coordinate kv.Coordinator, leaderRedis map[raft.ServerAddress]string) error { + redisL, err := lc.Listen(ctx, "tcp", redisAddr) + if err != nil { + return errors.Wrapf(err, "failed to listen on %s", redisAddr) + } + eg.Go(func() error { + return errors.WithStack(adapter.NewRedisServer(redisL, shardStore, coordinate, leaderRedis).Run()) + }) + return nil } diff --git a/multiraft_runtime.go b/multiraft_runtime.go new file mode 100644 index 00000000..d39b0b7a --- /dev/null +++ b/multiraft_runtime.go @@ -0,0 +1,148 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + transport "github.com/Jille/raft-grpc-transport" + "github.com/bootjp/elastickv/store" + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" + boltdb "github.com/hashicorp/raft-boltdb/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type raftGroupRuntime struct { + spec groupSpec + raft *raft.Raft + tm *transport.Manager + store store.MVCCStore + + closeStores func() +} + +func (r *raftGroupRuntime) Close() { + if r == nil { + return + } + if r.raft != nil { + _ = r.raft.Shutdown().Error() + r.raft = nil + } + if r.tm != nil { + _ = r.tm.Close() + r.tm = nil + } + if r.closeStores != nil { + r.closeStores() + r.closeStores = nil + } + if r.store != nil { + _ = r.store.Close() + r.store = nil + } +} + +func closeBoltStores(ldb, sdb **boltdb.BoltStore) { + if ldb == nil || sdb == nil { + return + } + if *ldb != nil { + _ = (*ldb).Close() + *ldb = nil + } + if *sdb != nil { + _ = (*sdb).Close() + *sdb = nil + } +} + +func closeTransportManager(tm **transport.Manager) { + if tm == nil || *tm == nil { + return + } + _ = (*tm).Close() + *tm = nil +} + +const raftDirPerm = 0o755 + +func groupDataDir(baseDir, raftID string, groupID uint64, multi bool) string { + if !multi { + return filepath.Join(baseDir, raftID) + } + return filepath.Join(baseDir, raftID, fmt.Sprintf("group-%d", groupID)) +} + +func newRaftGroup(raftID string, group groupSpec, baseDir string, multi bool, bootstrap bool, fsm raft.FSM) (*raft.Raft, *transport.Manager, func(), error) { + c := raft.DefaultConfig() + c.LocalID = raft.ServerID(raftID) + c.HeartbeatTimeout = heartbeatTimeout + c.ElectionTimeout = electionTimeout + c.LeaderLeaseTimeout = leaderLease + + dir := groupDataDir(baseDir, raftID, group.id, multi) + if err := os.MkdirAll(dir, raftDirPerm); err != nil { + return nil, nil, nil, errors.WithStack(err) + } + + var ldb *boltdb.BoltStore + var sdb *boltdb.BoltStore + var tm *transport.Manager + + closeStores := func() { closeBoltStores(&ldb, &sdb) } + cleanup := func() { + closeTransportManager(&tm) + closeStores() + } + + var err error + ldb, err = boltdb.NewBoltStore(filepath.Join(dir, "logs.dat")) + if err != nil { + return nil, nil, nil, errors.WithStack(err) + } + + sdb, err = boltdb.NewBoltStore(filepath.Join(dir, "stable.dat")) + if err != nil { + cleanup() + return nil, nil, nil, errors.WithStack(err) + } + + fss, err := raft.NewFileSnapshotStore(dir, snapshotRetainCount, os.Stderr) + if err != nil { + cleanup() + return nil, nil, nil, errors.WithStack(err) + } + + tm = transport.New(raft.ServerAddress(group.address), []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + }) + + r, err := raft.NewRaft(c, fsm, ldb, sdb, fss, tm.Transport()) + if err != nil { + cleanup() + return nil, nil, nil, errors.WithStack(err) + } + + if bootstrap { + cfg := raft.Configuration{ + Servers: []raft.Server{ + { + Suffrage: raft.Voter, + ID: raft.ServerID(raftID), + Address: raft.ServerAddress(group.address), + }, + }, + } + f := r.BootstrapCluster(cfg) + if err := f.Error(); err != nil { + _ = r.Shutdown().Error() + cleanup() + return nil, nil, nil, errors.WithStack(err) + } + } + + return r, tm, closeStores, nil +} diff --git a/multiraft_runtime_test.go b/multiraft_runtime_test.go new file mode 100644 index 00000000..e555cecb --- /dev/null +++ b/multiraft_runtime_test.go @@ -0,0 +1,65 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/bootjp/elastickv/kv" + "github.com/bootjp/elastickv/store" + "github.com/hashicorp/raft" + "github.com/stretchr/testify/require" +) + +func TestGroupDataDir(t *testing.T) { + base := "/tmp/data" + raftID := "n1" + + t.Run("single", func(t *testing.T) { + require.Equal(t, filepath.Join(base, raftID), groupDataDir(base, raftID, 1, false)) + }) + + t.Run("multi", func(t *testing.T) { + require.Equal(t, filepath.Join(base, raftID, "group-2"), groupDataDir(base, raftID, 2, true)) + }) +} + +func TestNewRaftGroupBootstrap(t *testing.T) { + baseDir := t.TempDir() + + st := store.NewMVCCStore() + fsm := kv.NewKvFSM(st) + + r, tm, closeStores, err := newRaftGroup( + "n1", + groupSpec{id: 1, address: "127.0.0.1:0"}, + baseDir, + true, // multi + true, // bootstrap + fsm, + ) + require.NoError(t, err) + require.NotNil(t, r) + require.NotNil(t, tm) + t.Cleanup(func() { + _ = r.Shutdown().Error() + _ = tm.Close() + if closeStores != nil { + closeStores() + } + }) + + dir := groupDataDir(baseDir, "n1", 1, true) + _, err = os.Stat(dir) + require.NoError(t, err) + + _, err = os.Stat(filepath.Join(dir, "logs.dat")) + require.NoError(t, err) + _, err = os.Stat(filepath.Join(dir, "stable.dat")) + require.NoError(t, err) + + require.Eventually(t, func() bool { + return r.State() == raft.Leader + }, 5*time.Second, 10*time.Millisecond) +} diff --git a/proto/distribution.pb.go b/proto/distribution.pb.go index 3d6c7799..0530f447 100644 --- a/proto/distribution.pb.go +++ b/proto/distribution.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.36.11 // protoc v3.21.12 // source: distribution.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -21,20 +22,17 @@ const ( ) type GetRouteRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` unknownFields protoimpl.UnknownFields - - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetRouteRequest) Reset() { *x = GetRouteRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_distribution_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_distribution_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetRouteRequest) String() string { @@ -45,7 +43,7 @@ func (*GetRouteRequest) ProtoMessage() {} func (x *GetRouteRequest) ProtoReflect() protoreflect.Message { mi := &file_distribution_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -68,24 +66,21 @@ func (x *GetRouteRequest) GetKey() []byte { } type GetRouteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // start is inclusive and end is exclusive. A missing end denotes an // unbounded range extending to positive infinity. - Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` - End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` - RaftGroupId uint64 `protobuf:"varint,3,opt,name=raft_group_id,json=raftGroupId,proto3" json:"raft_group_id,omitempty"` + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` + RaftGroupId uint64 `protobuf:"varint,3,opt,name=raft_group_id,json=raftGroupId,proto3" json:"raft_group_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetRouteResponse) Reset() { *x = GetRouteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_distribution_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_distribution_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetRouteResponse) String() string { @@ -96,7 +91,7 @@ func (*GetRouteResponse) ProtoMessage() {} func (x *GetRouteResponse) ProtoReflect() protoreflect.Message { mi := &file_distribution_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -133,18 +128,16 @@ func (x *GetRouteResponse) GetRaftGroupId() uint64 { } type GetTimestampRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetTimestampRequest) Reset() { *x = GetTimestampRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_distribution_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_distribution_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTimestampRequest) String() string { @@ -155,7 +148,7 @@ func (*GetTimestampRequest) ProtoMessage() {} func (x *GetTimestampRequest) ProtoReflect() protoreflect.Message { mi := &file_distribution_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -171,20 +164,17 @@ func (*GetTimestampRequest) Descriptor() ([]byte, []int) { } type GetTimestampResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` unknownFields protoimpl.UnknownFields - - Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetTimestampResponse) Reset() { *x = GetTimestampResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_distribution_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_distribution_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTimestampResponse) String() string { @@ -195,7 +185,7 @@ func (*GetTimestampResponse) ProtoMessage() {} func (x *GetTimestampResponse) ProtoReflect() protoreflect.Message { mi := &file_distribution_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -219,49 +209,36 @@ func (x *GetTimestampResponse) GetTimestamp() uint64 { var File_distribution_proto protoreflect.FileDescriptor -var file_distribution_proto_rawDesc = []byte{ - 0x0a, 0x12, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x23, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x5e, 0x0a, 0x10, 0x47, 0x65, 0x74, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x72, 0x61, - 0x66, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x34, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x32, 0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x12, 0x10, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x0c, 0x47, 0x65, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x15, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x23, 0x5a, 0x21, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x6a, 0x70, 0x2f, 0x65, - 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x76, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_distribution_proto_rawDesc = "" + + "\n" + + "\x12distribution.proto\"#\n" + + "\x0fGetRouteRequest\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\"^\n" + + "\x10GetRouteResponse\x12\x14\n" + + "\x05start\x18\x01 \x01(\fR\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\fR\x03end\x12\"\n" + + "\rraft_group_id\x18\x03 \x01(\x04R\vraftGroupId\"\x15\n" + + "\x13GetTimestampRequest\"4\n" + + "\x14GetTimestampResponse\x12\x1c\n" + + "\ttimestamp\x18\x01 \x01(\x04R\ttimestamp2\x80\x01\n" + + "\fDistribution\x121\n" + + "\bGetRoute\x12\x10.GetRouteRequest\x1a\x11.GetRouteResponse\"\x00\x12=\n" + + "\fGetTimestamp\x12\x14.GetTimestampRequest\x1a\x15.GetTimestampResponse\"\x00B#Z!github.com/bootjp/elastickv/protob\x06proto3" var ( file_distribution_proto_rawDescOnce sync.Once - file_distribution_proto_rawDescData = file_distribution_proto_rawDesc + file_distribution_proto_rawDescData []byte ) func file_distribution_proto_rawDescGZIP() []byte { file_distribution_proto_rawDescOnce.Do(func() { - file_distribution_proto_rawDescData = protoimpl.X.CompressGZIP(file_distribution_proto_rawDescData) + file_distribution_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_distribution_proto_rawDesc), len(file_distribution_proto_rawDesc))) }) return file_distribution_proto_rawDescData } var file_distribution_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_distribution_proto_goTypes = []interface{}{ +var file_distribution_proto_goTypes = []any{ (*GetRouteRequest)(nil), // 0: GetRouteRequest (*GetRouteResponse)(nil), // 1: GetRouteResponse (*GetTimestampRequest)(nil), // 2: GetTimestampRequest @@ -284,61 +261,11 @@ func file_distribution_proto_init() { if File_distribution_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_distribution_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRouteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_distribution_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRouteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_distribution_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTimestampRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_distribution_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTimestampResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_distribution_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_distribution_proto_rawDesc), len(file_distribution_proto_rawDesc)), NumEnums: 0, NumMessages: 4, NumExtensions: 0, @@ -349,7 +276,6 @@ func file_distribution_proto_init() { MessageInfos: file_distribution_proto_msgTypes, }.Build() File_distribution_proto = out.File - file_distribution_proto_rawDesc = nil file_distribution_proto_goTypes = nil file_distribution_proto_depIdxs = nil } diff --git a/proto/distribution_grpc.pb.go b/proto/distribution_grpc.pb.go index 4c85059b..a3dfa3cf 100644 --- a/proto/distribution_grpc.pb.go +++ b/proto/distribution_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.6.1 // - protoc v3.21.12 // source: distribution.proto @@ -15,8 +15,13 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Distribution_GetRoute_FullMethodName = "/Distribution/GetRoute" + Distribution_GetTimestamp_FullMethodName = "/Distribution/GetTimestamp" +) // DistributionClient is the client API for Distribution service. // @@ -35,8 +40,9 @@ func NewDistributionClient(cc grpc.ClientConnInterface) DistributionClient { } func (c *distributionClient) GetRoute(ctx context.Context, in *GetRouteRequest, opts ...grpc.CallOption) (*GetRouteResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetRouteResponse) - err := c.cc.Invoke(ctx, "/Distribution/GetRoute", in, out, opts...) + err := c.cc.Invoke(ctx, Distribution_GetRoute_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -44,8 +50,9 @@ func (c *distributionClient) GetRoute(ctx context.Context, in *GetRouteRequest, } func (c *distributionClient) GetTimestamp(ctx context.Context, in *GetTimestampRequest, opts ...grpc.CallOption) (*GetTimestampResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(GetTimestampResponse) - err := c.cc.Invoke(ctx, "/Distribution/GetTimestamp", in, out, opts...) + err := c.cc.Invoke(ctx, Distribution_GetTimestamp_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -54,24 +61,28 @@ func (c *distributionClient) GetTimestamp(ctx context.Context, in *GetTimestampR // DistributionServer is the server API for Distribution service. // All implementations must embed UnimplementedDistributionServer -// for forward compatibility +// for forward compatibility. type DistributionServer interface { GetRoute(context.Context, *GetRouteRequest) (*GetRouteResponse, error) GetTimestamp(context.Context, *GetTimestampRequest) (*GetTimestampResponse, error) mustEmbedUnimplementedDistributionServer() } -// UnimplementedDistributionServer must be embedded to have forward compatible implementations. -type UnimplementedDistributionServer struct { -} +// UnimplementedDistributionServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedDistributionServer struct{} func (UnimplementedDistributionServer) GetRoute(context.Context, *GetRouteRequest) (*GetRouteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRoute not implemented") + return nil, status.Error(codes.Unimplemented, "method GetRoute not implemented") } func (UnimplementedDistributionServer) GetTimestamp(context.Context, *GetTimestampRequest) (*GetTimestampResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTimestamp not implemented") + return nil, status.Error(codes.Unimplemented, "method GetTimestamp not implemented") } func (UnimplementedDistributionServer) mustEmbedUnimplementedDistributionServer() {} +func (UnimplementedDistributionServer) testEmbeddedByValue() {} // UnsafeDistributionServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to DistributionServer will @@ -81,6 +92,13 @@ type UnsafeDistributionServer interface { } func RegisterDistributionServer(s grpc.ServiceRegistrar, srv DistributionServer) { + // If the following call panics, it indicates UnimplementedDistributionServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Distribution_ServiceDesc, srv) } @@ -94,7 +112,7 @@ func _Distribution_GetRoute_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Distribution/GetRoute", + FullMethod: Distribution_GetRoute_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DistributionServer).GetRoute(ctx, req.(*GetRouteRequest)) @@ -112,7 +130,7 @@ func _Distribution_GetTimestamp_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Distribution/GetTimestamp", + FullMethod: Distribution_GetTimestamp_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DistributionServer).GetTimestamp(ctx, req.(*GetTimestampRequest)) diff --git a/proto/internal.pb.go b/proto/internal.pb.go index cc0430ba..d2d4521f 100644 --- a/proto/internal.pb.go +++ b/proto/internal.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 -// protoc v5.29.3 -// source: proto/internal.proto +// protoc-gen-go v1.36.11 +// protoc v3.21.12 +// source: internal.proto package proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -51,11 +52,11 @@ func (x Op) String() string { } func (Op) Descriptor() protoreflect.EnumDescriptor { - return file_proto_internal_proto_enumTypes[0].Descriptor() + return file_internal_proto_enumTypes[0].Descriptor() } func (Op) Type() protoreflect.EnumType { - return &file_proto_internal_proto_enumTypes[0] + return &file_internal_proto_enumTypes[0] } func (x Op) Number() protoreflect.EnumNumber { @@ -64,7 +65,7 @@ func (x Op) Number() protoreflect.EnumNumber { // Deprecated: Use Op.Descriptor instead. func (Op) EnumDescriptor() ([]byte, []int) { - return file_proto_internal_proto_rawDescGZIP(), []int{0} + return file_internal_proto_rawDescGZIP(), []int{0} } type Phase int32 @@ -103,11 +104,11 @@ func (x Phase) String() string { } func (Phase) Descriptor() protoreflect.EnumDescriptor { - return file_proto_internal_proto_enumTypes[1].Descriptor() + return file_internal_proto_enumTypes[1].Descriptor() } func (Phase) Type() protoreflect.EnumType { - return &file_proto_internal_proto_enumTypes[1] + return &file_internal_proto_enumTypes[1] } func (x Phase) Number() protoreflect.EnumNumber { @@ -116,26 +117,23 @@ func (x Phase) Number() protoreflect.EnumNumber { // Deprecated: Use Phase.Descriptor instead. func (Phase) EnumDescriptor() ([]byte, []int) { - return file_proto_internal_proto_rawDescGZIP(), []int{1} + return file_internal_proto_rawDescGZIP(), []int{1} } type Mutation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Op Op `protobuf:"varint,1,opt,name=op,proto3,enum=Op" json:"op,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Op Op `protobuf:"varint,1,opt,name=op,proto3,enum=Op" json:"op,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Mutation) Reset() { *x = Mutation{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_internal_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Mutation) String() string { @@ -145,8 +143,8 @@ func (x *Mutation) String() string { func (*Mutation) ProtoMessage() {} func (x *Mutation) ProtoReflect() protoreflect.Message { - mi := &file_proto_internal_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_internal_proto_msgTypes[0] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -158,7 +156,7 @@ func (x *Mutation) ProtoReflect() protoreflect.Message { // Deprecated: Use Mutation.ProtoReflect.Descriptor instead. func (*Mutation) Descriptor() ([]byte, []int) { - return file_proto_internal_proto_rawDescGZIP(), []int{0} + return file_internal_proto_rawDescGZIP(), []int{0} } func (x *Mutation) GetOp() Op { @@ -183,23 +181,20 @@ func (x *Mutation) GetValue() []byte { } type Request struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + IsTxn bool `protobuf:"varint,1,opt,name=is_txn,json=isTxn,proto3" json:"is_txn,omitempty"` + Phase Phase `protobuf:"varint,2,opt,name=phase,proto3,enum=Phase" json:"phase,omitempty"` + Ts uint64 `protobuf:"varint,3,opt,name=ts,proto3" json:"ts,omitempty"` + Mutations []*Mutation `protobuf:"bytes,4,rep,name=mutations,proto3" json:"mutations,omitempty"` unknownFields protoimpl.UnknownFields - - IsTxn bool `protobuf:"varint,1,opt,name=is_txn,json=isTxn,proto3" json:"is_txn,omitempty"` - Phase Phase `protobuf:"varint,2,opt,name=phase,proto3,enum=Phase" json:"phase,omitempty"` - Ts uint64 `protobuf:"varint,3,opt,name=ts,proto3" json:"ts,omitempty"` - Mutations []*Mutation `protobuf:"bytes,4,rep,name=mutations,proto3" json:"mutations,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Request) Reset() { *x = Request{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_internal_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Request) String() string { @@ -209,8 +204,8 @@ func (x *Request) String() string { func (*Request) ProtoMessage() {} func (x *Request) ProtoReflect() protoreflect.Message { - mi := &file_proto_internal_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_internal_proto_msgTypes[1] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -222,7 +217,7 @@ func (x *Request) ProtoReflect() protoreflect.Message { // Deprecated: Use Request.ProtoReflect.Descriptor instead. func (*Request) Descriptor() ([]byte, []int) { - return file_proto_internal_proto_rawDescGZIP(), []int{1} + return file_internal_proto_rawDescGZIP(), []int{1} } func (x *Request) GetIsTxn() bool { @@ -254,21 +249,18 @@ func (x *Request) GetMutations() []*Mutation { } type ForwardRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + IsTxn bool `protobuf:"varint,1,opt,name=is_txn,json=isTxn,proto3" json:"is_txn,omitempty"` + Requests []*Request `protobuf:"bytes,2,rep,name=requests,proto3" json:"requests,omitempty"` unknownFields protoimpl.UnknownFields - - IsTxn bool `protobuf:"varint,1,opt,name=is_txn,json=isTxn,proto3" json:"is_txn,omitempty"` - Requests []*Request `protobuf:"bytes,2,rep,name=requests,proto3" json:"requests,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ForwardRequest) Reset() { *x = ForwardRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_internal_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ForwardRequest) String() string { @@ -278,8 +270,8 @@ func (x *ForwardRequest) String() string { func (*ForwardRequest) ProtoMessage() {} func (x *ForwardRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_internal_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_internal_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -291,7 +283,7 @@ func (x *ForwardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ForwardRequest.ProtoReflect.Descriptor instead. func (*ForwardRequest) Descriptor() ([]byte, []int) { - return file_proto_internal_proto_rawDescGZIP(), []int{2} + return file_internal_proto_rawDescGZIP(), []int{2} } func (x *ForwardRequest) GetIsTxn() bool { @@ -310,21 +302,18 @@ func (x *ForwardRequest) GetRequests() []*Request { // for internal leader redirect type ForwardResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + CommitIndex uint64 `protobuf:"varint,2,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` unknownFields protoimpl.UnknownFields - - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - CommitIndex uint64 `protobuf:"varint,2,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ForwardResponse) Reset() { *x = ForwardResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_internal_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_internal_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ForwardResponse) String() string { @@ -334,8 +323,8 @@ func (x *ForwardResponse) String() string { func (*ForwardResponse) ProtoMessage() {} func (x *ForwardResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_internal_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_internal_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -347,7 +336,7 @@ func (x *ForwardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ForwardResponse.ProtoReflect.Descriptor instead. func (*ForwardResponse) Descriptor() ([]byte, []int) { - return file_proto_internal_proto_rawDescGZIP(), []int{3} + return file_internal_proto_rawDescGZIP(), []int{3} } func (x *ForwardResponse) GetSuccess() bool { @@ -364,61 +353,53 @@ func (x *ForwardResponse) GetCommitIndex() uint64 { return 0 } -var File_proto_internal_proto protoreflect.FileDescriptor - -var file_proto_internal_proto_rawDesc = []byte{ - 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x47, 0x0a, 0x08, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x03, - 0x2e, 0x4f, 0x70, 0x52, 0x02, 0x6f, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x77, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x73, - 0x5f, 0x74, 0x78, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x69, 0x73, 0x54, 0x78, - 0x6e, 0x12, 0x1c, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x06, 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, - 0x27, 0x0a, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6d, - 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4d, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x73, - 0x5f, 0x74, 0x78, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x69, 0x73, 0x54, 0x78, - 0x6e, 0x12, 0x24, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x22, 0x4e, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, - 0x69, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2a, 0x16, 0x0a, 0x02, 0x4f, 0x70, 0x12, 0x07, 0x0a, - 0x03, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x45, 0x4c, 0x10, 0x01, 0x2a, - 0x35, 0x0a, 0x05, 0x50, 0x68, 0x61, 0x73, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, - 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, - 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x41, - 0x42, 0x4f, 0x52, 0x54, 0x10, 0x03, 0x32, 0x3a, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x07, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x12, 0x0f, 0x2e, - 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, - 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x42, 0x23, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x6a, 0x70, 0x2f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x6b, - 0x76, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +var File_internal_proto protoreflect.FileDescriptor + +const file_internal_proto_rawDesc = "" + + "\n" + + "\x0einternal.proto\"G\n" + + "\bMutation\x12\x13\n" + + "\x02op\x18\x01 \x01(\x0e2\x03.OpR\x02op\x12\x10\n" + + "\x03key\x18\x02 \x01(\fR\x03key\x12\x14\n" + + "\x05value\x18\x03 \x01(\fR\x05value\"w\n" + + "\aRequest\x12\x15\n" + + "\x06is_txn\x18\x01 \x01(\bR\x05isTxn\x12\x1c\n" + + "\x05phase\x18\x02 \x01(\x0e2\x06.PhaseR\x05phase\x12\x0e\n" + + "\x02ts\x18\x03 \x01(\x04R\x02ts\x12'\n" + + "\tmutations\x18\x04 \x03(\v2\t.MutationR\tmutations\"M\n" + + "\x0eForwardRequest\x12\x15\n" + + "\x06is_txn\x18\x01 \x01(\bR\x05isTxn\x12$\n" + + "\brequests\x18\x02 \x03(\v2\b.RequestR\brequests\"N\n" + + "\x0fForwardResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12!\n" + + "\fcommit_index\x18\x02 \x01(\x04R\vcommitIndex*\x16\n" + + "\x02Op\x12\a\n" + + "\x03PUT\x10\x00\x12\a\n" + + "\x03DEL\x10\x01*5\n" + + "\x05Phase\x12\b\n" + + "\x04NONE\x10\x00\x12\v\n" + + "\aPREPARE\x10\x01\x12\n" + + "\n" + + "\x06COMMIT\x10\x02\x12\t\n" + + "\x05ABORT\x10\x032:\n" + + "\bInternal\x12.\n" + + "\aForward\x12\x0f.ForwardRequest\x1a\x10.ForwardResponse\"\x00B#Z!github.com/bootjp/elastickv/protob\x06proto3" var ( - file_proto_internal_proto_rawDescOnce sync.Once - file_proto_internal_proto_rawDescData = file_proto_internal_proto_rawDesc + file_internal_proto_rawDescOnce sync.Once + file_internal_proto_rawDescData []byte ) -func file_proto_internal_proto_rawDescGZIP() []byte { - file_proto_internal_proto_rawDescOnce.Do(func() { - file_proto_internal_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_internal_proto_rawDescData) +func file_internal_proto_rawDescGZIP() []byte { + file_internal_proto_rawDescOnce.Do(func() { + file_internal_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_internal_proto_rawDesc), len(file_internal_proto_rawDesc))) }) - return file_proto_internal_proto_rawDescData + return file_internal_proto_rawDescData } -var file_proto_internal_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_proto_internal_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_proto_internal_proto_goTypes = []any{ +var file_internal_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_internal_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_internal_proto_goTypes = []any{ (Op)(0), // 0: Op (Phase)(0), // 1: Phase (*Mutation)(nil), // 2: Mutation @@ -426,7 +407,7 @@ var file_proto_internal_proto_goTypes = []any{ (*ForwardRequest)(nil), // 4: ForwardRequest (*ForwardResponse)(nil), // 5: ForwardResponse } -var file_proto_internal_proto_depIdxs = []int32{ +var file_internal_proto_depIdxs = []int32{ 0, // 0: Mutation.op:type_name -> Op 1, // 1: Request.phase:type_name -> Phase 2, // 2: Request.mutations:type_name -> Mutation @@ -440,78 +421,27 @@ var file_proto_internal_proto_depIdxs = []int32{ 0, // [0:4] is the sub-list for field type_name } -func init() { file_proto_internal_proto_init() } -func file_proto_internal_proto_init() { - if File_proto_internal_proto != nil { +func init() { file_internal_proto_init() } +func file_internal_proto_init() { + if File_internal_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_proto_internal_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Mutation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_internal_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Request); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_internal_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ForwardRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_internal_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ForwardResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_proto_internal_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_internal_proto_rawDesc), len(file_internal_proto_rawDesc)), NumEnums: 2, NumMessages: 4, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_proto_internal_proto_goTypes, - DependencyIndexes: file_proto_internal_proto_depIdxs, - EnumInfos: file_proto_internal_proto_enumTypes, - MessageInfos: file_proto_internal_proto_msgTypes, + GoTypes: file_internal_proto_goTypes, + DependencyIndexes: file_internal_proto_depIdxs, + EnumInfos: file_internal_proto_enumTypes, + MessageInfos: file_internal_proto_msgTypes, }.Build() - File_proto_internal_proto = out.File - file_proto_internal_proto_rawDesc = nil - file_proto_internal_proto_goTypes = nil - file_proto_internal_proto_depIdxs = nil + File_internal_proto = out.File + file_internal_proto_goTypes = nil + file_internal_proto_depIdxs = nil } diff --git a/proto/internal_grpc.pb.go b/proto/internal_grpc.pb.go index 4ab75514..38c7d537 100644 --- a/proto/internal_grpc.pb.go +++ b/proto/internal_grpc.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v5.29.3 -// source: proto/internal.proto +// - protoc-gen-go-grpc v1.6.1 +// - protoc v3.21.12 +// source: internal.proto package proto @@ -15,8 +15,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Internal_Forward_FullMethodName = "/Internal/Forward" @@ -50,21 +50,25 @@ func (c *internalClient) Forward(ctx context.Context, in *ForwardRequest, opts . // InternalServer is the server API for Internal service. // All implementations must embed UnimplementedInternalServer -// for forward compatibility +// for forward compatibility. type InternalServer interface { // for internal leader redirect only Forward(context.Context, *ForwardRequest) (*ForwardResponse, error) mustEmbedUnimplementedInternalServer() } -// UnimplementedInternalServer must be embedded to have forward compatible implementations. -type UnimplementedInternalServer struct { -} +// UnimplementedInternalServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedInternalServer struct{} func (UnimplementedInternalServer) Forward(context.Context, *ForwardRequest) (*ForwardResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Forward not implemented") + return nil, status.Error(codes.Unimplemented, "method Forward not implemented") } func (UnimplementedInternalServer) mustEmbedUnimplementedInternalServer() {} +func (UnimplementedInternalServer) testEmbeddedByValue() {} // UnsafeInternalServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to InternalServer will @@ -74,6 +78,13 @@ type UnsafeInternalServer interface { } func RegisterInternalServer(s grpc.ServiceRegistrar, srv InternalServer) { + // If the following call panics, it indicates UnimplementedInternalServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Internal_ServiceDesc, srv) } @@ -108,5 +119,5 @@ var Internal_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "proto/internal.proto", + Metadata: "internal.proto", } diff --git a/proto/service.pb.go b/proto/service.pb.go index 795a34e0..cc39a904 100644 --- a/proto/service.pb.go +++ b/proto/service.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 -// protoc v5.29.3 -// source: proto/service.proto +// protoc-gen-go v1.36.11 +// protoc v3.21.12 +// source: service.proto package proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -21,21 +22,18 @@ const ( ) type RawPutRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RawPutRequest) Reset() { *x = RawPutRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RawPutRequest) String() string { @@ -45,8 +43,8 @@ func (x *RawPutRequest) String() string { func (*RawPutRequest) ProtoMessage() {} func (x *RawPutRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[0] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -58,7 +56,7 @@ func (x *RawPutRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RawPutRequest.ProtoReflect.Descriptor instead. func (*RawPutRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{0} + return file_service_proto_rawDescGZIP(), []int{0} } func (x *RawPutRequest) GetKey() []byte { @@ -76,21 +74,18 @@ func (x *RawPutRequest) GetValue() []byte { } type RawPutResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` unknownFields protoimpl.UnknownFields - - CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` - Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RawPutResponse) Reset() { *x = RawPutResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RawPutResponse) String() string { @@ -100,8 +95,8 @@ func (x *RawPutResponse) String() string { func (*RawPutResponse) ProtoMessage() {} func (x *RawPutResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[1] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -113,7 +108,7 @@ func (x *RawPutResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RawPutResponse.ProtoReflect.Descriptor instead. func (*RawPutResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{1} + return file_service_proto_rawDescGZIP(), []int{1} } func (x *RawPutResponse) GetCommitIndex() uint64 { @@ -131,21 +126,18 @@ func (x *RawPutResponse) GetSuccess() bool { } type RawGetRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Ts uint64 `protobuf:"varint,3,opt,name=ts,proto3" json:"ts,omitempty"` // optional read timestamp; if zero, server uses current HLC unknownFields protoimpl.UnknownFields - - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Ts uint64 `protobuf:"varint,3,opt,name=ts,proto3" json:"ts,omitempty"` // optional read timestamp; if zero, server uses current HLC + sizeCache protoimpl.SizeCache } func (x *RawGetRequest) Reset() { *x = RawGetRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RawGetRequest) String() string { @@ -155,8 +147,8 @@ func (x *RawGetRequest) String() string { func (*RawGetRequest) ProtoMessage() {} func (x *RawGetRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -168,7 +160,7 @@ func (x *RawGetRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RawGetRequest.ProtoReflect.Descriptor instead. func (*RawGetRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{2} + return file_service_proto_rawDescGZIP(), []int{2} } func (x *RawGetRequest) GetKey() []byte { @@ -186,21 +178,18 @@ func (x *RawGetRequest) GetTs() uint64 { } type RawGetResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ReadAtIndex uint64 `protobuf:"varint,1,opt,name=read_at_index,json=readAtIndex,proto3" json:"read_at_index,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - ReadAtIndex uint64 `protobuf:"varint,1,opt,name=read_at_index,json=readAtIndex,proto3" json:"read_at_index,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RawGetResponse) Reset() { *x = RawGetResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RawGetResponse) String() string { @@ -210,8 +199,8 @@ func (x *RawGetResponse) String() string { func (*RawGetResponse) ProtoMessage() {} func (x *RawGetResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -223,7 +212,7 @@ func (x *RawGetResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RawGetResponse.ProtoReflect.Descriptor instead. func (*RawGetResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{3} + return file_service_proto_rawDescGZIP(), []int{3} } func (x *RawGetResponse) GetReadAtIndex() uint64 { @@ -241,20 +230,17 @@ func (x *RawGetResponse) GetValue() []byte { } type RawDeleteRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` unknownFields protoimpl.UnknownFields - - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RawDeleteRequest) Reset() { *x = RawDeleteRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RawDeleteRequest) String() string { @@ -264,8 +250,8 @@ func (x *RawDeleteRequest) String() string { func (*RawDeleteRequest) ProtoMessage() {} func (x *RawDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -277,7 +263,7 @@ func (x *RawDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RawDeleteRequest.ProtoReflect.Descriptor instead. func (*RawDeleteRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{4} + return file_service_proto_rawDescGZIP(), []int{4} } func (x *RawDeleteRequest) GetKey() []byte { @@ -288,21 +274,18 @@ func (x *RawDeleteRequest) GetKey() []byte { } type RawDeleteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` unknownFields protoimpl.UnknownFields - - CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` - Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RawDeleteResponse) Reset() { *x = RawDeleteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RawDeleteResponse) String() string { @@ -312,8 +295,8 @@ func (x *RawDeleteResponse) String() string { func (*RawDeleteResponse) ProtoMessage() {} func (x *RawDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -325,7 +308,7 @@ func (x *RawDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RawDeleteResponse.ProtoReflect.Descriptor instead. func (*RawDeleteResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{5} + return file_service_proto_rawDescGZIP(), []int{5} } func (x *RawDeleteResponse) GetCommitIndex() uint64 { @@ -342,22 +325,279 @@ func (x *RawDeleteResponse) GetSuccess() bool { return false } -type PutRequest struct { - state protoimpl.MessageState +type RawLatestCommitTSRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +} + +func (x *RawLatestCommitTSRequest) Reset() { + *x = RawLatestCommitTSRequest{} + mi := &file_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawLatestCommitTSRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawLatestCommitTSRequest) ProtoMessage() {} + +func (x *RawLatestCommitTSRequest) ProtoReflect() protoreflect.Message { + mi := &file_service_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawLatestCommitTSRequest.ProtoReflect.Descriptor instead. +func (*RawLatestCommitTSRequest) Descriptor() ([]byte, []int) { + return file_service_proto_rawDescGZIP(), []int{6} +} + +func (x *RawLatestCommitTSRequest) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +type RawLatestCommitTSResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ts uint64 `protobuf:"varint,1,opt,name=ts,proto3" json:"ts,omitempty"` + Exists bool `protobuf:"varint,2,opt,name=exists,proto3" json:"exists,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +func (x *RawLatestCommitTSResponse) Reset() { + *x = RawLatestCommitTSResponse{} + mi := &file_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *PutRequest) Reset() { - *x = PutRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[6] +func (x *RawLatestCommitTSResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawLatestCommitTSResponse) ProtoMessage() {} + +func (x *RawLatestCommitTSResponse) ProtoReflect() protoreflect.Message { + mi := &file_service_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawLatestCommitTSResponse.ProtoReflect.Descriptor instead. +func (*RawLatestCommitTSResponse) Descriptor() ([]byte, []int) { + return file_service_proto_rawDescGZIP(), []int{7} +} + +func (x *RawLatestCommitTSResponse) GetTs() uint64 { + if x != nil { + return x.Ts + } + return 0 +} + +func (x *RawLatestCommitTSResponse) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +type RawScanAtRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + StartKey []byte `protobuf:"bytes,1,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey []byte `protobuf:"bytes,2,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` // validated against host int size; large values may be rejected + Ts uint64 `protobuf:"varint,4,opt,name=ts,proto3" json:"ts,omitempty"` // optional read timestamp; if zero, server uses current HLC + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawScanAtRequest) Reset() { + *x = RawScanAtRequest{} + mi := &file_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawScanAtRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawScanAtRequest) ProtoMessage() {} + +func (x *RawScanAtRequest) ProtoReflect() protoreflect.Message { + mi := &file_service_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawScanAtRequest.ProtoReflect.Descriptor instead. +func (*RawScanAtRequest) Descriptor() ([]byte, []int) { + return file_service_proto_rawDescGZIP(), []int{8} +} + +func (x *RawScanAtRequest) GetStartKey() []byte { + if x != nil { + return x.StartKey + } + return nil +} + +func (x *RawScanAtRequest) GetEndKey() []byte { + if x != nil { + return x.EndKey + } + return nil +} + +func (x *RawScanAtRequest) GetLimit() int64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *RawScanAtRequest) GetTs() uint64 { + if x != nil { + return x.Ts + } + return 0 +} + +type RawKVPair struct { + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawKVPair) Reset() { + *x = RawKVPair{} + mi := &file_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawKVPair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawKVPair) ProtoMessage() {} + +func (x *RawKVPair) ProtoReflect() protoreflect.Message { + mi := &file_service_proto_msgTypes[9] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use RawKVPair.ProtoReflect.Descriptor instead. +func (*RawKVPair) Descriptor() ([]byte, []int) { + return file_service_proto_rawDescGZIP(), []int{9} +} + +func (x *RawKVPair) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *RawKVPair) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type RawScanAtResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Kv []*RawKVPair `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RawScanAtResponse) Reset() { + *x = RawScanAtResponse{} + mi := &file_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RawScanAtResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RawScanAtResponse) ProtoMessage() {} + +func (x *RawScanAtResponse) ProtoReflect() protoreflect.Message { + mi := &file_service_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RawScanAtResponse.ProtoReflect.Descriptor instead. +func (*RawScanAtResponse) Descriptor() ([]byte, []int) { + return file_service_proto_rawDescGZIP(), []int{10} +} + +func (x *RawScanAtResponse) GetKv() []*RawKVPair { + if x != nil { + return x.Kv + } + return nil +} + +type PutRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PutRequest) Reset() { + *x = PutRequest{} + mi := &file_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PutRequest) String() string { @@ -367,8 +607,8 @@ func (x *PutRequest) String() string { func (*PutRequest) ProtoMessage() {} func (x *PutRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[11] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -380,7 +620,7 @@ func (x *PutRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PutRequest.ProtoReflect.Descriptor instead. func (*PutRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{6} + return file_service_proto_rawDescGZIP(), []int{11} } func (x *PutRequest) GetKey() []byte { @@ -398,21 +638,18 @@ func (x *PutRequest) GetValue() []byte { } type PutResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` unknownFields protoimpl.UnknownFields - - CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` - Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PutResponse) Reset() { *x = PutResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PutResponse) String() string { @@ -422,8 +659,8 @@ func (x *PutResponse) String() string { func (*PutResponse) ProtoMessage() {} func (x *PutResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[12] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -435,7 +672,7 @@ func (x *PutResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PutResponse.ProtoReflect.Descriptor instead. func (*PutResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{7} + return file_service_proto_rawDescGZIP(), []int{12} } func (x *PutResponse) GetCommitIndex() uint64 { @@ -453,20 +690,17 @@ func (x *PutResponse) GetSuccess() bool { } type DeleteRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` unknownFields protoimpl.UnknownFields - - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DeleteRequest) Reset() { *x = DeleteRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DeleteRequest) String() string { @@ -476,8 +710,8 @@ func (x *DeleteRequest) String() string { func (*DeleteRequest) ProtoMessage() {} func (x *DeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[13] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -489,7 +723,7 @@ func (x *DeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. func (*DeleteRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{8} + return file_service_proto_rawDescGZIP(), []int{13} } func (x *DeleteRequest) GetKey() []byte { @@ -500,21 +734,18 @@ func (x *DeleteRequest) GetKey() []byte { } type DeleteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` unknownFields protoimpl.UnknownFields - - CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` - Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DeleteResponse) Reset() { *x = DeleteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DeleteResponse) String() string { @@ -524,8 +755,8 @@ func (x *DeleteResponse) String() string { func (*DeleteResponse) ProtoMessage() {} func (x *DeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[14] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -537,7 +768,7 @@ func (x *DeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. func (*DeleteResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{9} + return file_service_proto_rawDescGZIP(), []int{14} } func (x *DeleteResponse) GetCommitIndex() uint64 { @@ -555,20 +786,17 @@ func (x *DeleteResponse) GetSuccess() bool { } type GetRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` unknownFields protoimpl.UnknownFields - - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetRequest) Reset() { *x = GetRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetRequest) String() string { @@ -578,8 +806,8 @@ func (x *GetRequest) String() string { func (*GetRequest) ProtoMessage() {} func (x *GetRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[15] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -591,7 +819,7 @@ func (x *GetRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. func (*GetRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{10} + return file_service_proto_rawDescGZIP(), []int{15} } func (x *GetRequest) GetKey() []byte { @@ -602,21 +830,18 @@ func (x *GetRequest) GetKey() []byte { } type GetResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ReadAtIndex uint64 `protobuf:"varint,1,opt,name=read_at_index,json=readAtIndex,proto3" json:"read_at_index,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - ReadAtIndex uint64 `protobuf:"varint,1,opt,name=read_at_index,json=readAtIndex,proto3" json:"read_at_index,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetResponse) Reset() { *x = GetResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetResponse) String() string { @@ -626,8 +851,8 @@ func (x *GetResponse) String() string { func (*GetResponse) ProtoMessage() {} func (x *GetResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[16] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -639,7 +864,7 @@ func (x *GetResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. func (*GetResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{11} + return file_service_proto_rawDescGZIP(), []int{16} } func (x *GetResponse) GetReadAtIndex() uint64 { @@ -657,21 +882,18 @@ func (x *GetResponse) GetValue() []byte { } type KeyError struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + Retryable bool `protobuf:"varint,2,opt,name=retryable,proto3" json:"retryable,omitempty"` unknownFields protoimpl.UnknownFields - - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` - Retryable bool `protobuf:"varint,2,opt,name=retryable,proto3" json:"retryable,omitempty"` + sizeCache protoimpl.SizeCache } func (x *KeyError) Reset() { *x = KeyError{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *KeyError) String() string { @@ -681,8 +903,8 @@ func (x *KeyError) String() string { func (*KeyError) ProtoMessage() {} func (x *KeyError) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[17] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -694,7 +916,7 @@ func (x *KeyError) ProtoReflect() protoreflect.Message { // Deprecated: Use KeyError.ProtoReflect.Descriptor instead. func (*KeyError) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{12} + return file_service_proto_rawDescGZIP(), []int{17} } func (x *KeyError) GetMessage() string { @@ -712,22 +934,19 @@ func (x *KeyError) GetRetryable() bool { } type Kv struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Error *KeyError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Error *KeyError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Kv) Reset() { *x = Kv{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Kv) String() string { @@ -737,8 +956,8 @@ func (x *Kv) String() string { func (*Kv) ProtoMessage() {} func (x *Kv) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[18] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -750,7 +969,7 @@ func (x *Kv) ProtoReflect() protoreflect.Message { // Deprecated: Use Kv.ProtoReflect.Descriptor instead. func (*Kv) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{13} + return file_service_proto_rawDescGZIP(), []int{18} } func (x *Kv) GetError() *KeyError { @@ -775,22 +994,19 @@ func (x *Kv) GetValue() []byte { } type ScanRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + StartKey []byte `protobuf:"bytes,1,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + EndKey []byte `protobuf:"bytes,2,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` + Limit uint64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` unknownFields protoimpl.UnknownFields - - StartKey []byte `protobuf:"bytes,1,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` - EndKey []byte `protobuf:"bytes,2,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` - Limit uint64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ScanRequest) Reset() { *x = ScanRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ScanRequest) String() string { @@ -800,8 +1016,8 @@ func (x *ScanRequest) String() string { func (*ScanRequest) ProtoMessage() {} func (x *ScanRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[19] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -813,7 +1029,7 @@ func (x *ScanRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ScanRequest.ProtoReflect.Descriptor instead. func (*ScanRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{14} + return file_service_proto_rawDescGZIP(), []int{19} } func (x *ScanRequest) GetStartKey() []byte { @@ -838,20 +1054,17 @@ func (x *ScanRequest) GetLimit() uint64 { } type ScanResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Kv []*Kv `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` unknownFields protoimpl.UnknownFields - - Kv []*Kv `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ScanResponse) Reset() { *x = ScanResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ScanResponse) String() string { @@ -861,8 +1074,8 @@ func (x *ScanResponse) String() string { func (*ScanResponse) ProtoMessage() {} func (x *ScanResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[20] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -874,7 +1087,7 @@ func (x *ScanResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ScanResponse.ProtoReflect.Descriptor instead. func (*ScanResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{15} + return file_service_proto_rawDescGZIP(), []int{20} } func (x *ScanResponse) GetKv() []*Kv { @@ -885,25 +1098,22 @@ func (x *ScanResponse) GetKv() []*Kv { } type PreWriteRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // mutations is the list of mutations to apply atomically. Mutations []*Kv `protobuf:"bytes,2,rep,name=mutations,proto3" json:"mutations,omitempty"` // start_ts is the timestamp of the transaction. StartTs uint64 `protobuf:"varint,3,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` // lock_ttl is the TTL of the lock in milliseconds. - LockTtl uint64 `protobuf:"varint,4,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` + LockTtl uint64 `protobuf:"varint,4,opt,name=lock_ttl,json=lockTtl,proto3" json:"lock_ttl,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PreWriteRequest) Reset() { *x = PreWriteRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PreWriteRequest) String() string { @@ -913,8 +1123,8 @@ func (x *PreWriteRequest) String() string { func (*PreWriteRequest) ProtoMessage() {} func (x *PreWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[21] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -926,7 +1136,7 @@ func (x *PreWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PreWriteRequest.ProtoReflect.Descriptor instead. func (*PreWriteRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{16} + return file_service_proto_rawDescGZIP(), []int{21} } func (x *PreWriteRequest) GetMutations() []*Kv { @@ -951,20 +1161,17 @@ func (x *PreWriteRequest) GetLockTtl() uint64 { } type PreCommitResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Errors []*KeyError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` unknownFields protoimpl.UnknownFields - - Errors []*KeyError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PreCommitResponse) Reset() { *x = PreCommitResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PreCommitResponse) String() string { @@ -974,8 +1181,8 @@ func (x *PreCommitResponse) String() string { func (*PreCommitResponse) ProtoMessage() {} func (x *PreCommitResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[22] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -987,7 +1194,7 @@ func (x *PreCommitResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PreCommitResponse.ProtoReflect.Descriptor instead. func (*PreCommitResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{17} + return file_service_proto_rawDescGZIP(), []int{22} } func (x *PreCommitResponse) GetErrors() []*KeyError { @@ -998,22 +1205,19 @@ func (x *PreCommitResponse) GetErrors() []*KeyError { } type CommitRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // start_ts is the timestamp of prewrite request. - StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` - Keys [][]byte `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` + Keys [][]byte `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CommitRequest) Reset() { *x = CommitRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CommitRequest) String() string { @@ -1023,8 +1227,8 @@ func (x *CommitRequest) String() string { func (*CommitRequest) ProtoMessage() {} func (x *CommitRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[23] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1036,7 +1240,7 @@ func (x *CommitRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CommitRequest.ProtoReflect.Descriptor instead. func (*CommitRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{18} + return file_service_proto_rawDescGZIP(), []int{23} } func (x *CommitRequest) GetStartTs() uint64 { @@ -1054,22 +1258,19 @@ func (x *CommitRequest) GetKeys() [][]byte { } type CommitResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` + Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` + Errors []*KeyError `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` unknownFields protoimpl.UnknownFields - - CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` - Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` - Errors []*KeyError `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CommitResponse) Reset() { *x = CommitResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CommitResponse) String() string { @@ -1079,8 +1280,8 @@ func (x *CommitResponse) String() string { func (*CommitResponse) ProtoMessage() {} func (x *CommitResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[24] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1092,7 +1293,7 @@ func (x *CommitResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CommitResponse.ProtoReflect.Descriptor instead. func (*CommitResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{19} + return file_service_proto_rawDescGZIP(), []int{24} } func (x *CommitResponse) GetCommitIndex() uint64 { @@ -1117,20 +1318,17 @@ func (x *CommitResponse) GetErrors() []*KeyError { } type RollbackRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` unknownFields protoimpl.UnknownFields - - StartTs uint64 `protobuf:"varint,1,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RollbackRequest) Reset() { *x = RollbackRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RollbackRequest) String() string { @@ -1140,8 +1338,8 @@ func (x *RollbackRequest) String() string { func (*RollbackRequest) ProtoMessage() {} func (x *RollbackRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[25] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1153,7 +1351,7 @@ func (x *RollbackRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead. func (*RollbackRequest) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{20} + return file_service_proto_rawDescGZIP(), []int{25} } func (x *RollbackRequest) GetStartTs() uint64 { @@ -1164,20 +1362,17 @@ func (x *RollbackRequest) GetStartTs() uint64 { } type RollbackResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` unknownFields protoimpl.UnknownFields - - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RollbackResponse) Reset() { *x = RollbackResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_service_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RollbackResponse) String() string { @@ -1187,8 +1382,8 @@ func (x *RollbackResponse) String() string { func (*RollbackResponse) ProtoMessage() {} func (x *RollbackResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_service_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_service_proto_msgTypes[26] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1200,7 +1395,7 @@ func (x *RollbackResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackResponse.ProtoReflect.Descriptor instead. func (*RollbackResponse) Descriptor() ([]byte, []int) { - return file_proto_service_proto_rawDescGZIP(), []int{21} + return file_service_proto_rawDescGZIP(), []int{26} } func (x *RollbackResponse) GetSuccess() bool { @@ -1210,493 +1405,207 @@ func (x *RollbackResponse) GetSuccess() bool { return false } -var File_proto_service_proto protoreflect.FileDescriptor - -var file_proto_service_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x37, 0x0a, 0x0d, 0x52, 0x61, 0x77, 0x50, 0x75, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, - 0x0a, 0x0e, 0x52, 0x61, 0x77, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x31, 0x0a, - 0x0d, 0x52, 0x61, 0x77, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, - 0x22, 0x4a, 0x0a, 0x0e, 0x52, 0x61, 0x77, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x41, - 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x24, 0x0a, 0x10, - 0x52, 0x61, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x52, 0x61, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x22, 0x34, 0x0a, 0x0a, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4a, 0x0a, 0x0b, 0x50, 0x75, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x21, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4d, 0x0a, 0x0e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, - 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x1e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x47, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x72, 0x65, 0x61, 0x64, 0x5f, - 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x72, 0x65, 0x61, 0x64, 0x41, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0x42, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x4d, 0x0a, 0x02, 0x4b, 0x76, 0x12, 0x1f, 0x0a, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x4b, 0x65, 0x79, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x0b, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, - 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, - 0x23, 0x0a, 0x0c, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x13, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x03, 0x2e, 0x4b, 0x76, - 0x52, 0x02, 0x6b, 0x76, 0x22, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x03, 0x2e, 0x4b, 0x76, 0x52, - 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x5f, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x54, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x74, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x74, 0x6c, - 0x22, 0x36, 0x0a, 0x11, 0x50, 0x72, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x4b, 0x65, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x3e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x6d, - 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x54, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0c, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x70, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x6d, - 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x4b, 0x65, 0x79, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x2c, 0x0a, 0x0f, 0x52, 0x6f, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, - 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x73, 0x22, 0x2c, 0x0a, 0x10, 0x52, 0x6f, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x32, 0x97, 0x01, 0x0a, 0x05, 0x52, 0x61, 0x77, 0x4b, 0x56, - 0x12, 0x2b, 0x0a, 0x06, 0x52, 0x61, 0x77, 0x50, 0x75, 0x74, 0x12, 0x0e, 0x2e, 0x52, 0x61, 0x77, - 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x52, 0x61, 0x77, - 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x2b, 0x0a, - 0x06, 0x52, 0x61, 0x77, 0x47, 0x65, 0x74, 0x12, 0x0e, 0x2e, 0x52, 0x61, 0x77, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x52, 0x61, 0x77, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x09, 0x52, 0x61, - 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x11, 0x2e, 0x52, 0x61, 0x77, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x52, 0x61, 0x77, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x32, 0xc1, 0x02, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x4b, 0x56, 0x12, 0x22, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x0b, 0x2e, 0x50, 0x75, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x22, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, - 0x0b, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x06, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x0e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x25, 0x0a, 0x04, 0x53, 0x63, 0x61, - 0x6e, 0x12, 0x0c, 0x2e, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x0d, 0x2e, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x32, 0x0a, 0x08, 0x50, 0x72, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x10, 0x2e, 0x50, - 0x72, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, - 0x2e, 0x50, 0x72, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x0e, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x31, 0x0a, 0x08, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x10, 0x2e, - 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x11, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x42, 0x23, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x6a, 0x70, 0x2f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, - 0x63, 0x6b, 0x76, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +var File_service_proto protoreflect.FileDescriptor + +const file_service_proto_rawDesc = "" + + "\n" + + "\rservice.proto\"7\n" + + "\rRawPutRequest\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"M\n" + + "\x0eRawPutResponse\x12!\n" + + "\fcommit_index\x18\x01 \x01(\x04R\vcommitIndex\x12\x18\n" + + "\asuccess\x18\x02 \x01(\bR\asuccess\"1\n" + + "\rRawGetRequest\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\x12\x0e\n" + + "\x02ts\x18\x03 \x01(\x04R\x02ts\"J\n" + + "\x0eRawGetResponse\x12\"\n" + + "\rread_at_index\x18\x01 \x01(\x04R\vreadAtIndex\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"$\n" + + "\x10RawDeleteRequest\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\"P\n" + + "\x11RawDeleteResponse\x12!\n" + + "\fcommit_index\x18\x01 \x01(\x04R\vcommitIndex\x12\x18\n" + + "\asuccess\x18\x02 \x01(\bR\asuccess\",\n" + + "\x18RawLatestCommitTSRequest\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\"C\n" + + "\x19RawLatestCommitTSResponse\x12\x0e\n" + + "\x02ts\x18\x01 \x01(\x04R\x02ts\x12\x16\n" + + "\x06exists\x18\x02 \x01(\bR\x06exists\"n\n" + + "\x10RawScanAtRequest\x12\x1b\n" + + "\tstart_key\x18\x01 \x01(\fR\bstartKey\x12\x17\n" + + "\aend_key\x18\x02 \x01(\fR\x06endKey\x12\x14\n" + + "\x05limit\x18\x03 \x01(\x03R\x05limit\x12\x0e\n" + + "\x02ts\x18\x04 \x01(\x04R\x02ts\"3\n" + + "\tRawKVPair\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"/\n" + + "\x11RawScanAtResponse\x12\x1a\n" + + "\x02kv\x18\x01 \x03(\v2\n" + + ".RawKVPairR\x02kv\"4\n" + + "\n" + + "PutRequest\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"J\n" + + "\vPutResponse\x12!\n" + + "\fcommit_index\x18\x01 \x01(\x04R\vcommitIndex\x12\x18\n" + + "\asuccess\x18\x02 \x01(\bR\asuccess\"!\n" + + "\rDeleteRequest\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\"M\n" + + "\x0eDeleteResponse\x12!\n" + + "\fcommit_index\x18\x01 \x01(\x04R\vcommitIndex\x12\x18\n" + + "\asuccess\x18\x02 \x01(\bR\asuccess\"\x1e\n" + + "\n" + + "GetRequest\x12\x10\n" + + "\x03key\x18\x01 \x01(\fR\x03key\"G\n" + + "\vGetResponse\x12\"\n" + + "\rread_at_index\x18\x01 \x01(\x04R\vreadAtIndex\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value\"B\n" + + "\bKeyError\x12\x18\n" + + "\amessage\x18\x01 \x01(\tR\amessage\x12\x1c\n" + + "\tretryable\x18\x02 \x01(\bR\tretryable\"M\n" + + "\x02Kv\x12\x1f\n" + + "\x05error\x18\x01 \x01(\v2\t.KeyErrorR\x05error\x12\x10\n" + + "\x03key\x18\x02 \x01(\fR\x03key\x12\x14\n" + + "\x05value\x18\x03 \x01(\fR\x05value\"Y\n" + + "\vScanRequest\x12\x1b\n" + + "\tstart_key\x18\x01 \x01(\fR\bstartKey\x12\x17\n" + + "\aend_key\x18\x02 \x01(\fR\x06endKey\x12\x14\n" + + "\x05limit\x18\x03 \x01(\x04R\x05limit\"#\n" + + "\fScanResponse\x12\x13\n" + + "\x02kv\x18\x01 \x03(\v2\x03.KvR\x02kv\"j\n" + + "\x0fPreWriteRequest\x12!\n" + + "\tmutations\x18\x02 \x03(\v2\x03.KvR\tmutations\x12\x19\n" + + "\bstart_ts\x18\x03 \x01(\x04R\astartTs\x12\x19\n" + + "\block_ttl\x18\x04 \x01(\x04R\alockTtl\"6\n" + + "\x11PreCommitResponse\x12!\n" + + "\x06errors\x18\x01 \x03(\v2\t.KeyErrorR\x06errors\">\n" + + "\rCommitRequest\x12\x19\n" + + "\bstart_ts\x18\x01 \x01(\x04R\astartTs\x12\x12\n" + + "\x04keys\x18\x02 \x03(\fR\x04keys\"p\n" + + "\x0eCommitResponse\x12!\n" + + "\fcommit_index\x18\x01 \x01(\x04R\vcommitIndex\x12\x18\n" + + "\asuccess\x18\x02 \x01(\bR\asuccess\x12!\n" + + "\x06errors\x18\x03 \x03(\v2\t.KeyErrorR\x06errors\",\n" + + "\x0fRollbackRequest\x12\x19\n" + + "\bstart_ts\x18\x01 \x01(\x04R\astartTs\",\n" + + "\x10RollbackResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess2\x9b\x02\n" + + "\x05RawKV\x12+\n" + + "\x06RawPut\x12\x0e.RawPutRequest\x1a\x0f.RawPutResponse\"\x00\x12+\n" + + "\x06RawGet\x12\x0e.RawGetRequest\x1a\x0f.RawGetResponse\"\x00\x124\n" + + "\tRawDelete\x12\x11.RawDeleteRequest\x1a\x12.RawDeleteResponse\"\x00\x12L\n" + + "\x11RawLatestCommitTS\x12\x19.RawLatestCommitTSRequest\x1a\x1a.RawLatestCommitTSResponse\"\x00\x124\n" + + "\tRawScanAt\x12\x11.RawScanAtRequest\x1a\x12.RawScanAtResponse\"\x002\xc1\x02\n" + + "\x0fTransactionalKV\x12\"\n" + + "\x03Put\x12\v.PutRequest\x1a\f.PutResponse\"\x00\x12\"\n" + + "\x03Get\x12\v.GetRequest\x1a\f.GetResponse\"\x00\x12+\n" + + "\x06Delete\x12\x0e.DeleteRequest\x1a\x0f.DeleteResponse\"\x00\x12%\n" + + "\x04Scan\x12\f.ScanRequest\x1a\r.ScanResponse\"\x00\x122\n" + + "\bPreWrite\x12\x10.PreWriteRequest\x1a\x12.PreCommitResponse\"\x00\x12+\n" + + "\x06Commit\x12\x0e.CommitRequest\x1a\x0f.CommitResponse\"\x00\x121\n" + + "\bRollback\x12\x10.RollbackRequest\x1a\x11.RollbackResponse\"\x00B#Z!github.com/bootjp/elastickv/protob\x06proto3" var ( - file_proto_service_proto_rawDescOnce sync.Once - file_proto_service_proto_rawDescData = file_proto_service_proto_rawDesc + file_service_proto_rawDescOnce sync.Once + file_service_proto_rawDescData []byte ) -func file_proto_service_proto_rawDescGZIP() []byte { - file_proto_service_proto_rawDescOnce.Do(func() { - file_proto_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_service_proto_rawDescData) +func file_service_proto_rawDescGZIP() []byte { + file_service_proto_rawDescOnce.Do(func() { + file_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_service_proto_rawDesc), len(file_service_proto_rawDesc))) }) - return file_proto_service_proto_rawDescData -} - -var file_proto_service_proto_msgTypes = make([]protoimpl.MessageInfo, 22) -var file_proto_service_proto_goTypes = []any{ - (*RawPutRequest)(nil), // 0: RawPutRequest - (*RawPutResponse)(nil), // 1: RawPutResponse - (*RawGetRequest)(nil), // 2: RawGetRequest - (*RawGetResponse)(nil), // 3: RawGetResponse - (*RawDeleteRequest)(nil), // 4: RawDeleteRequest - (*RawDeleteResponse)(nil), // 5: RawDeleteResponse - (*PutRequest)(nil), // 6: PutRequest - (*PutResponse)(nil), // 7: PutResponse - (*DeleteRequest)(nil), // 8: DeleteRequest - (*DeleteResponse)(nil), // 9: DeleteResponse - (*GetRequest)(nil), // 10: GetRequest - (*GetResponse)(nil), // 11: GetResponse - (*KeyError)(nil), // 12: KeyError - (*Kv)(nil), // 13: Kv - (*ScanRequest)(nil), // 14: ScanRequest - (*ScanResponse)(nil), // 15: ScanResponse - (*PreWriteRequest)(nil), // 16: PreWriteRequest - (*PreCommitResponse)(nil), // 17: PreCommitResponse - (*CommitRequest)(nil), // 18: CommitRequest - (*CommitResponse)(nil), // 19: CommitResponse - (*RollbackRequest)(nil), // 20: RollbackRequest - (*RollbackResponse)(nil), // 21: RollbackResponse -} -var file_proto_service_proto_depIdxs = []int32{ - 12, // 0: Kv.error:type_name -> KeyError - 13, // 1: ScanResponse.kv:type_name -> Kv - 13, // 2: PreWriteRequest.mutations:type_name -> Kv - 12, // 3: PreCommitResponse.errors:type_name -> KeyError - 12, // 4: CommitResponse.errors:type_name -> KeyError - 0, // 5: RawKV.RawPut:input_type -> RawPutRequest - 2, // 6: RawKV.RawGet:input_type -> RawGetRequest - 4, // 7: RawKV.RawDelete:input_type -> RawDeleteRequest - 6, // 8: TransactionalKV.Put:input_type -> PutRequest - 10, // 9: TransactionalKV.Get:input_type -> GetRequest - 8, // 10: TransactionalKV.Delete:input_type -> DeleteRequest - 14, // 11: TransactionalKV.Scan:input_type -> ScanRequest - 16, // 12: TransactionalKV.PreWrite:input_type -> PreWriteRequest - 18, // 13: TransactionalKV.Commit:input_type -> CommitRequest - 20, // 14: TransactionalKV.Rollback:input_type -> RollbackRequest - 1, // 15: RawKV.RawPut:output_type -> RawPutResponse - 3, // 16: RawKV.RawGet:output_type -> RawGetResponse - 5, // 17: RawKV.RawDelete:output_type -> RawDeleteResponse - 7, // 18: TransactionalKV.Put:output_type -> PutResponse - 11, // 19: TransactionalKV.Get:output_type -> GetResponse - 9, // 20: TransactionalKV.Delete:output_type -> DeleteResponse - 15, // 21: TransactionalKV.Scan:output_type -> ScanResponse - 17, // 22: TransactionalKV.PreWrite:output_type -> PreCommitResponse - 19, // 23: TransactionalKV.Commit:output_type -> CommitResponse - 21, // 24: TransactionalKV.Rollback:output_type -> RollbackResponse - 15, // [15:25] is the sub-list for method output_type - 5, // [5:15] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_proto_service_proto_init() } -func file_proto_service_proto_init() { - if File_proto_service_proto != nil { + return file_service_proto_rawDescData +} + +var file_service_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_service_proto_goTypes = []any{ + (*RawPutRequest)(nil), // 0: RawPutRequest + (*RawPutResponse)(nil), // 1: RawPutResponse + (*RawGetRequest)(nil), // 2: RawGetRequest + (*RawGetResponse)(nil), // 3: RawGetResponse + (*RawDeleteRequest)(nil), // 4: RawDeleteRequest + (*RawDeleteResponse)(nil), // 5: RawDeleteResponse + (*RawLatestCommitTSRequest)(nil), // 6: RawLatestCommitTSRequest + (*RawLatestCommitTSResponse)(nil), // 7: RawLatestCommitTSResponse + (*RawScanAtRequest)(nil), // 8: RawScanAtRequest + (*RawKVPair)(nil), // 9: RawKVPair + (*RawScanAtResponse)(nil), // 10: RawScanAtResponse + (*PutRequest)(nil), // 11: PutRequest + (*PutResponse)(nil), // 12: PutResponse + (*DeleteRequest)(nil), // 13: DeleteRequest + (*DeleteResponse)(nil), // 14: DeleteResponse + (*GetRequest)(nil), // 15: GetRequest + (*GetResponse)(nil), // 16: GetResponse + (*KeyError)(nil), // 17: KeyError + (*Kv)(nil), // 18: Kv + (*ScanRequest)(nil), // 19: ScanRequest + (*ScanResponse)(nil), // 20: ScanResponse + (*PreWriteRequest)(nil), // 21: PreWriteRequest + (*PreCommitResponse)(nil), // 22: PreCommitResponse + (*CommitRequest)(nil), // 23: CommitRequest + (*CommitResponse)(nil), // 24: CommitResponse + (*RollbackRequest)(nil), // 25: RollbackRequest + (*RollbackResponse)(nil), // 26: RollbackResponse +} +var file_service_proto_depIdxs = []int32{ + 9, // 0: RawScanAtResponse.kv:type_name -> RawKVPair + 17, // 1: Kv.error:type_name -> KeyError + 18, // 2: ScanResponse.kv:type_name -> Kv + 18, // 3: PreWriteRequest.mutations:type_name -> Kv + 17, // 4: PreCommitResponse.errors:type_name -> KeyError + 17, // 5: CommitResponse.errors:type_name -> KeyError + 0, // 6: RawKV.RawPut:input_type -> RawPutRequest + 2, // 7: RawKV.RawGet:input_type -> RawGetRequest + 4, // 8: RawKV.RawDelete:input_type -> RawDeleteRequest + 6, // 9: RawKV.RawLatestCommitTS:input_type -> RawLatestCommitTSRequest + 8, // 10: RawKV.RawScanAt:input_type -> RawScanAtRequest + 11, // 11: TransactionalKV.Put:input_type -> PutRequest + 15, // 12: TransactionalKV.Get:input_type -> GetRequest + 13, // 13: TransactionalKV.Delete:input_type -> DeleteRequest + 19, // 14: TransactionalKV.Scan:input_type -> ScanRequest + 21, // 15: TransactionalKV.PreWrite:input_type -> PreWriteRequest + 23, // 16: TransactionalKV.Commit:input_type -> CommitRequest + 25, // 17: TransactionalKV.Rollback:input_type -> RollbackRequest + 1, // 18: RawKV.RawPut:output_type -> RawPutResponse + 3, // 19: RawKV.RawGet:output_type -> RawGetResponse + 5, // 20: RawKV.RawDelete:output_type -> RawDeleteResponse + 7, // 21: RawKV.RawLatestCommitTS:output_type -> RawLatestCommitTSResponse + 10, // 22: RawKV.RawScanAt:output_type -> RawScanAtResponse + 12, // 23: TransactionalKV.Put:output_type -> PutResponse + 16, // 24: TransactionalKV.Get:output_type -> GetResponse + 14, // 25: TransactionalKV.Delete:output_type -> DeleteResponse + 20, // 26: TransactionalKV.Scan:output_type -> ScanResponse + 22, // 27: TransactionalKV.PreWrite:output_type -> PreCommitResponse + 24, // 28: TransactionalKV.Commit:output_type -> CommitResponse + 26, // 29: TransactionalKV.Rollback:output_type -> RollbackResponse + 18, // [18:30] is the sub-list for method output_type + 6, // [6:18] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_service_proto_init() } +func file_service_proto_init() { + if File_service_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_proto_service_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*RawPutRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*RawPutResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*RawGetRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*RawGetResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*RawDeleteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*RawDeleteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*PutRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*PutResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*DeleteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*DeleteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*GetRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*GetResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*KeyError); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*Kv); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*ScanRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*ScanResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*PreWriteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*PreCommitResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*CommitRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*CommitResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*RollbackRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_service_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*RollbackResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_proto_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_service_proto_rawDesc), len(file_service_proto_rawDesc)), NumEnums: 0, - NumMessages: 22, + NumMessages: 27, NumExtensions: 0, NumServices: 2, }, - GoTypes: file_proto_service_proto_goTypes, - DependencyIndexes: file_proto_service_proto_depIdxs, - MessageInfos: file_proto_service_proto_msgTypes, + GoTypes: file_service_proto_goTypes, + DependencyIndexes: file_service_proto_depIdxs, + MessageInfos: file_service_proto_msgTypes, }.Build() - File_proto_service_proto = out.File - file_proto_service_proto_rawDesc = nil - file_proto_service_proto_goTypes = nil - file_proto_service_proto_depIdxs = nil + File_service_proto = out.File + file_service_proto_goTypes = nil + file_service_proto_depIdxs = nil } diff --git a/proto/service.proto b/proto/service.proto index 54c0a7d0..2e52a778 100644 --- a/proto/service.proto +++ b/proto/service.proto @@ -6,6 +6,8 @@ service RawKV { rpc RawPut(RawPutRequest) returns (RawPutResponse) {} rpc RawGet(RawGetRequest) returns (RawGetResponse) {} rpc RawDelete(RawDeleteRequest) returns (RawDeleteResponse) {} + rpc RawLatestCommitTS(RawLatestCommitTSRequest) returns (RawLatestCommitTSResponse) {} + rpc RawScanAt(RawScanAtRequest) returns (RawScanAtResponse) {} } service TransactionalKV { @@ -48,6 +50,31 @@ message RawDeleteResponse { bool success = 2; } +message RawLatestCommitTSRequest { + bytes key = 1; +} + +message RawLatestCommitTSResponse { + uint64 ts = 1; + bool exists = 2; +} + +message RawScanAtRequest { + bytes start_key = 1; + bytes end_key = 2; + int64 limit = 3; // validated against host int size; large values may be rejected + uint64 ts = 4; // optional read timestamp; if zero, server uses current HLC +} + +message RawKVPair { + bytes key = 1; + bytes value = 2; +} + +message RawScanAtResponse { + repeated RawKVPair kv = 1; +} + message PutRequest { bytes key = 1; bytes value = 2; diff --git a/proto/service_grpc.pb.go b/proto/service_grpc.pb.go index fa66ebaf..631c8814 100644 --- a/proto/service_grpc.pb.go +++ b/proto/service_grpc.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v5.29.3 -// source: proto/service.proto +// - protoc-gen-go-grpc v1.6.1 +// - protoc v3.21.12 +// source: service.proto package proto @@ -15,13 +15,15 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( - RawKV_RawPut_FullMethodName = "/RawKV/RawPut" - RawKV_RawGet_FullMethodName = "/RawKV/RawGet" - RawKV_RawDelete_FullMethodName = "/RawKV/RawDelete" + RawKV_RawPut_FullMethodName = "/RawKV/RawPut" + RawKV_RawGet_FullMethodName = "/RawKV/RawGet" + RawKV_RawDelete_FullMethodName = "/RawKV/RawDelete" + RawKV_RawLatestCommitTS_FullMethodName = "/RawKV/RawLatestCommitTS" + RawKV_RawScanAt_FullMethodName = "/RawKV/RawScanAt" ) // RawKVClient is the client API for RawKV service. @@ -31,6 +33,8 @@ type RawKVClient interface { RawPut(ctx context.Context, in *RawPutRequest, opts ...grpc.CallOption) (*RawPutResponse, error) RawGet(ctx context.Context, in *RawGetRequest, opts ...grpc.CallOption) (*RawGetResponse, error) RawDelete(ctx context.Context, in *RawDeleteRequest, opts ...grpc.CallOption) (*RawDeleteResponse, error) + RawLatestCommitTS(ctx context.Context, in *RawLatestCommitTSRequest, opts ...grpc.CallOption) (*RawLatestCommitTSResponse, error) + RawScanAt(ctx context.Context, in *RawScanAtRequest, opts ...grpc.CallOption) (*RawScanAtResponse, error) } type rawKVClient struct { @@ -71,30 +75,62 @@ func (c *rawKVClient) RawDelete(ctx context.Context, in *RawDeleteRequest, opts return out, nil } +func (c *rawKVClient) RawLatestCommitTS(ctx context.Context, in *RawLatestCommitTSRequest, opts ...grpc.CallOption) (*RawLatestCommitTSResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawLatestCommitTSResponse) + err := c.cc.Invoke(ctx, RawKV_RawLatestCommitTS_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *rawKVClient) RawScanAt(ctx context.Context, in *RawScanAtRequest, opts ...grpc.CallOption) (*RawScanAtResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RawScanAtResponse) + err := c.cc.Invoke(ctx, RawKV_RawScanAt_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // RawKVServer is the server API for RawKV service. // All implementations must embed UnimplementedRawKVServer -// for forward compatibility +// for forward compatibility. type RawKVServer interface { RawPut(context.Context, *RawPutRequest) (*RawPutResponse, error) RawGet(context.Context, *RawGetRequest) (*RawGetResponse, error) RawDelete(context.Context, *RawDeleteRequest) (*RawDeleteResponse, error) + RawLatestCommitTS(context.Context, *RawLatestCommitTSRequest) (*RawLatestCommitTSResponse, error) + RawScanAt(context.Context, *RawScanAtRequest) (*RawScanAtResponse, error) mustEmbedUnimplementedRawKVServer() } -// UnimplementedRawKVServer must be embedded to have forward compatible implementations. -type UnimplementedRawKVServer struct { -} +// UnimplementedRawKVServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRawKVServer struct{} func (UnimplementedRawKVServer) RawPut(context.Context, *RawPutRequest) (*RawPutResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RawPut not implemented") + return nil, status.Error(codes.Unimplemented, "method RawPut not implemented") } func (UnimplementedRawKVServer) RawGet(context.Context, *RawGetRequest) (*RawGetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RawGet not implemented") + return nil, status.Error(codes.Unimplemented, "method RawGet not implemented") } func (UnimplementedRawKVServer) RawDelete(context.Context, *RawDeleteRequest) (*RawDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RawDelete not implemented") + return nil, status.Error(codes.Unimplemented, "method RawDelete not implemented") +} +func (UnimplementedRawKVServer) RawLatestCommitTS(context.Context, *RawLatestCommitTSRequest) (*RawLatestCommitTSResponse, error) { + return nil, status.Error(codes.Unimplemented, "method RawLatestCommitTS not implemented") +} +func (UnimplementedRawKVServer) RawScanAt(context.Context, *RawScanAtRequest) (*RawScanAtResponse, error) { + return nil, status.Error(codes.Unimplemented, "method RawScanAt not implemented") } func (UnimplementedRawKVServer) mustEmbedUnimplementedRawKVServer() {} +func (UnimplementedRawKVServer) testEmbeddedByValue() {} // UnsafeRawKVServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to RawKVServer will @@ -104,6 +140,13 @@ type UnsafeRawKVServer interface { } func RegisterRawKVServer(s grpc.ServiceRegistrar, srv RawKVServer) { + // If the following call panics, it indicates UnimplementedRawKVServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&RawKV_ServiceDesc, srv) } @@ -161,6 +204,42 @@ func _RawKV_RawDelete_Handler(srv interface{}, ctx context.Context, dec func(int return interceptor(ctx, in, info, handler) } +func _RawKV_RawLatestCommitTS_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawLatestCommitTSRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RawKVServer).RawLatestCommitTS(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RawKV_RawLatestCommitTS_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RawKVServer).RawLatestCommitTS(ctx, req.(*RawLatestCommitTSRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RawKV_RawScanAt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RawScanAtRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RawKVServer).RawScanAt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RawKV_RawScanAt_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RawKVServer).RawScanAt(ctx, req.(*RawScanAtRequest)) + } + return interceptor(ctx, in, info, handler) +} + // RawKV_ServiceDesc is the grpc.ServiceDesc for RawKV service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -180,9 +259,17 @@ var RawKV_ServiceDesc = grpc.ServiceDesc{ MethodName: "RawDelete", Handler: _RawKV_RawDelete_Handler, }, + { + MethodName: "RawLatestCommitTS", + Handler: _RawKV_RawLatestCommitTS_Handler, + }, + { + MethodName: "RawScanAt", + Handler: _RawKV_RawScanAt_Handler, + }, }, Streams: []grpc.StreamDesc{}, - Metadata: "proto/service.proto", + Metadata: "service.proto", } const ( @@ -288,7 +375,7 @@ func (c *transactionalKVClient) Rollback(ctx context.Context, in *RollbackReques // TransactionalKVServer is the server API for TransactionalKV service. // All implementations must embed UnimplementedTransactionalKVServer -// for forward compatibility +// for forward compatibility. type TransactionalKVServer interface { Put(context.Context, *PutRequest) (*PutResponse, error) Get(context.Context, *GetRequest) (*GetResponse, error) @@ -300,32 +387,36 @@ type TransactionalKVServer interface { mustEmbedUnimplementedTransactionalKVServer() } -// UnimplementedTransactionalKVServer must be embedded to have forward compatible implementations. -type UnimplementedTransactionalKVServer struct { -} +// UnimplementedTransactionalKVServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedTransactionalKVServer struct{} func (UnimplementedTransactionalKVServer) Put(context.Context, *PutRequest) (*PutResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") + return nil, status.Error(codes.Unimplemented, "method Put not implemented") } func (UnimplementedTransactionalKVServer) Get(context.Context, *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") + return nil, status.Error(codes.Unimplemented, "method Get not implemented") } func (UnimplementedTransactionalKVServer) Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") + return nil, status.Error(codes.Unimplemented, "method Delete not implemented") } func (UnimplementedTransactionalKVServer) Scan(context.Context, *ScanRequest) (*ScanResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Scan not implemented") + return nil, status.Error(codes.Unimplemented, "method Scan not implemented") } func (UnimplementedTransactionalKVServer) PreWrite(context.Context, *PreWriteRequest) (*PreCommitResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PreWrite not implemented") + return nil, status.Error(codes.Unimplemented, "method PreWrite not implemented") } func (UnimplementedTransactionalKVServer) Commit(context.Context, *CommitRequest) (*CommitResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") + return nil, status.Error(codes.Unimplemented, "method Commit not implemented") } func (UnimplementedTransactionalKVServer) Rollback(context.Context, *RollbackRequest) (*RollbackResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Rollback not implemented") + return nil, status.Error(codes.Unimplemented, "method Rollback not implemented") } func (UnimplementedTransactionalKVServer) mustEmbedUnimplementedTransactionalKVServer() {} +func (UnimplementedTransactionalKVServer) testEmbeddedByValue() {} // UnsafeTransactionalKVServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to TransactionalKVServer will @@ -335,6 +426,13 @@ type UnsafeTransactionalKVServer interface { } func RegisterTransactionalKVServer(s grpc.ServiceRegistrar, srv TransactionalKVServer) { + // If the following call panics, it indicates UnimplementedTransactionalKVServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&TransactionalKV_ServiceDesc, srv) } @@ -501,5 +599,5 @@ var TransactionalKV_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "proto/service.proto", + Metadata: "service.proto", } diff --git a/shard_config.go b/shard_config.go new file mode 100644 index 00000000..5716a64f --- /dev/null +++ b/shard_config.go @@ -0,0 +1,165 @@ +package main + +import ( + "bytes" + "strconv" + "strings" + + "github.com/cockroachdb/errors" + "github.com/hashicorp/raft" +) + +type groupSpec struct { + id uint64 + address string +} + +type rangeSpec struct { + start []byte + end []byte + groupID uint64 +} + +const splitParts = 2 + +var ( + ErrAddressRequired = errors.New("address is required") + ErrNoRaftGroupsConfigured = errors.New("no raft groups configured") + ErrNoShardRangesConfigured = errors.New("no shard ranges configured") + + ErrInvalidRaftGroupsEntry = errors.New("invalid raftGroups entry") + ErrInvalidShardRangesEntry = errors.New("invalid shardRanges entry") + ErrInvalidRaftRedisMapEntry = errors.New("invalid raftRedisMap entry") +) + +func parseRaftGroups(raw, defaultAddr string) ([]groupSpec, error) { + if raw == "" { + if defaultAddr == "" { + return nil, errors.WithStack(ErrAddressRequired) + } + return []groupSpec{{id: 1, address: defaultAddr}}, nil + } + parts := strings.Split(raw, ",") + groups := make([]groupSpec, 0, len(parts)) + seen := map[uint64]struct{}{} + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + kv := strings.SplitN(part, "=", splitParts) + if len(kv) != splitParts { + return nil, errors.Wrapf(ErrInvalidRaftGroupsEntry, "%q", part) + } + idRaw := strings.TrimSpace(kv[0]) + id, err := strconv.ParseUint(idRaw, 10, 64) + if err != nil { + return nil, errors.Wrapf(ErrInvalidRaftGroupsEntry, "invalid group id %q: %v", idRaw, err) + } + addr := strings.TrimSpace(kv[1]) + if addr == "" { + return nil, errors.Wrapf(ErrInvalidRaftGroupsEntry, "empty address for group %d", id) + } + if _, ok := seen[id]; ok { + return nil, errors.Wrapf(ErrInvalidRaftGroupsEntry, "duplicate group id %d", id) + } + seen[id] = struct{}{} + groups = append(groups, groupSpec{id: id, address: addr}) + } + if len(groups) == 0 { + return nil, errors.WithStack(ErrNoRaftGroupsConfigured) + } + return groups, nil +} + +func parseShardRanges(raw string, defaultGroup uint64) ([]rangeSpec, error) { + if raw == "" { + return []rangeSpec{{start: []byte(""), end: nil, groupID: defaultGroup}}, nil + } + parts := strings.Split(raw, ",") + ranges := make([]rangeSpec, 0, len(parts)) + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + kv := strings.SplitN(part, "=", splitParts) + if len(kv) != splitParts { + return nil, errors.Wrapf(ErrInvalidShardRangesEntry, "%q", part) + } + groupID, err := strconv.ParseUint(strings.TrimSpace(kv[1]), 10, 64) + if err != nil { + return nil, errors.Wrapf(ErrInvalidShardRangesEntry, "invalid group id in %q: %v", part, err) + } + rangePart := strings.TrimSpace(kv[0]) + bounds := strings.SplitN(rangePart, ":", splitParts) + if len(bounds) != splitParts { + return nil, errors.Wrapf(ErrInvalidShardRangesEntry, "invalid range %q (expected start:end)", rangePart) + } + // An empty start key represents the minimum key boundary. + start := []byte(strings.TrimSpace(bounds[0])) + var end []byte + if endStr := strings.TrimSpace(bounds[1]); endStr != "" { + end = []byte(endStr) + if bytes.Compare(start, end) >= 0 { + return nil, errors.Wrapf(ErrInvalidShardRangesEntry, "invalid range %q (start must be < end)", rangePart) + } + } + ranges = append(ranges, rangeSpec{start: start, end: end, groupID: groupID}) + } + if len(ranges) == 0 { + return nil, errors.WithStack(ErrNoShardRangesConfigured) + } + return ranges, nil +} + +func parseRaftRedisMap(raw string) (map[raft.ServerAddress]string, error) { + out := make(map[raft.ServerAddress]string) + if raw == "" { + return out, nil + } + parts := strings.Split(raw, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + kv := strings.SplitN(part, "=", splitParts) + if len(kv) != splitParts { + return nil, errors.Wrapf(ErrInvalidRaftRedisMapEntry, "%q", part) + } + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + if k == "" || v == "" { + return nil, errors.Wrapf(ErrInvalidRaftRedisMapEntry, "%q", part) + } + out[raft.ServerAddress(k)] = v + } + return out, nil +} + +func defaultGroupID(groups []groupSpec) uint64 { + min := uint64(0) + for _, g := range groups { + if min == 0 || g.id < min { + min = g.id + } + } + if min == 0 { + return 1 + } + return min +} + +func validateShardRanges(ranges []rangeSpec, groups []groupSpec) error { + ids := map[uint64]struct{}{} + for _, g := range groups { + ids[g.id] = struct{}{} + } + for _, r := range ranges { + if _, ok := ids[r.groupID]; !ok { + return errors.WithStack(errors.Newf("shard range references unknown group %d", r.groupID)) + } + } + return nil +} diff --git a/shard_config_test.go b/shard_config_test.go new file mode 100644 index 00000000..2f22ffa6 --- /dev/null +++ b/shard_config_test.go @@ -0,0 +1,135 @@ +package main + +import ( + "testing" + + "github.com/hashicorp/raft" + "github.com/stretchr/testify/require" +) + +func TestParseRaftGroups(t *testing.T) { + t.Run("default address required", func(t *testing.T) { + _, err := parseRaftGroups("", "") + require.ErrorIs(t, err, ErrAddressRequired) + }) + + t.Run("default group from default address", func(t *testing.T) { + groups, err := parseRaftGroups("", "127.0.0.1:50051") + require.NoError(t, err) + require.Equal(t, []groupSpec{{id: 1, address: "127.0.0.1:50051"}}, groups) + }) + + t.Run("multiple groups", func(t *testing.T) { + groups, err := parseRaftGroups("1=127.0.0.1:50051, 2=127.0.0.1:50052", "") + require.NoError(t, err) + require.Equal(t, []groupSpec{ + {id: 1, address: "127.0.0.1:50051"}, + {id: 2, address: "127.0.0.1:50052"}, + }, groups) + }) + + t.Run("trims whitespace around id", func(t *testing.T) { + groups, err := parseRaftGroups("1 = 127.0.0.1:50051, 2=127.0.0.1:50052", "") + require.NoError(t, err) + require.Equal(t, []groupSpec{ + {id: 1, address: "127.0.0.1:50051"}, + {id: 2, address: "127.0.0.1:50052"}, + }, groups) + }) + + t.Run("invalid entry", func(t *testing.T) { + _, err := parseRaftGroups("nope", "127.0.0.1:50051") + require.ErrorIs(t, err, ErrInvalidRaftGroupsEntry) + }) + + t.Run("duplicate ids", func(t *testing.T) { + _, err := parseRaftGroups("1=a,1=b", "") + require.Error(t, err) + }) + + t.Run("empty after trimming", func(t *testing.T) { + _, err := parseRaftGroups(" , , ", "127.0.0.1:50051") + require.ErrorIs(t, err, ErrNoRaftGroupsConfigured) + }) +} + +func TestParseShardRanges(t *testing.T) { + t.Run("default range", func(t *testing.T) { + ranges, err := parseShardRanges("", 7) + require.NoError(t, err) + require.Len(t, ranges, 1) + require.Equal(t, []byte(""), ranges[0].start) + require.Nil(t, ranges[0].end) + require.Equal(t, uint64(7), ranges[0].groupID) + }) + + t.Run("multiple ranges", func(t *testing.T) { + ranges, err := parseShardRanges("a:m=1, m:=2", 1) + require.NoError(t, err) + require.Equal(t, []rangeSpec{ + {start: []byte("a"), end: []byte("m"), groupID: 1}, + {start: []byte("m"), end: nil, groupID: 2}, + }, ranges) + }) + + t.Run("trims whitespace", func(t *testing.T) { + ranges, err := parseShardRanges(" a : m = 1 , m : = 2 ", 1) + require.NoError(t, err) + require.Equal(t, []rangeSpec{ + {start: []byte("a"), end: []byte("m"), groupID: 1}, + {start: []byte("m"), end: nil, groupID: 2}, + }, ranges) + }) + + t.Run("invalid entry", func(t *testing.T) { + _, err := parseShardRanges("a=1", 1) + require.ErrorIs(t, err, ErrInvalidShardRangesEntry) + }) + + t.Run("empty after trimming", func(t *testing.T) { + _, err := parseShardRanges(" , , ", 1) + require.ErrorIs(t, err, ErrNoShardRangesConfigured) + }) +} + +func TestParseRaftRedisMap(t *testing.T) { + m, err := parseRaftRedisMap("a=b, c=d") + require.NoError(t, err) + require.Equal(t, map[raft.ServerAddress]string{ + "a": "b", + "c": "d", + }, m) + + t.Run("trims whitespace", func(t *testing.T) { + m, err := parseRaftRedisMap(" a = b , c = d ") + require.NoError(t, err) + require.Equal(t, map[raft.ServerAddress]string{ + "a": "b", + "c": "d", + }, m) + }) + + t.Run("invalid entry errors", func(t *testing.T) { + _, err := parseRaftRedisMap("a=b, nope") + require.ErrorIs(t, err, ErrInvalidRaftRedisMapEntry) + }) +} + +func TestDefaultGroupID(t *testing.T) { + require.Equal(t, uint64(1), defaultGroupID(nil)) + require.Equal(t, uint64(2), defaultGroupID([]groupSpec{{id: 3}, {id: 2}})) +} + +func TestValidateShardRanges(t *testing.T) { + groups := []groupSpec{{id: 1}, {id: 2}} + + t.Run("valid", func(t *testing.T) { + err := validateShardRanges([]rangeSpec{{groupID: 1}}, groups) + require.NoError(t, err) + }) + + t.Run("unknown group", func(t *testing.T) { + err := validateShardRanges([]rangeSpec{{groupID: 3}}, groups) + require.Error(t, err) + }) +}